repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ArchiveTeam/spuf-grab | pipeline.py | 1 | 11245 | # encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20170615.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'spuf'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'spuf.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--load-cookies", "cookies.txt",
#"--no-cookies",
"--lua-script", "spuf.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "steampowered.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "steam-users-forum-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("steam-users-forum-item: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
tries = 0
while tries < 10:
if os.path.isfile('login.php?do=login'):
os.remove('login.php?do=login')
os.system("wget --save-cookies cookies.txt --user-agent 'ArchiveTeam' --keep-session-cookies --post-data 'vb_login_username=archiveTeam&cookieuser=1&vb_login_password=&s=&securitytoken=guest&do=login&vb_login_md5password=9aa65d84012ee50e456c4e6916089636&vb_login_md5password_utf=9aa65d84012ee50e456c4e6916089636' --referer http://forums.steampowered.com/forums/ http://forums.steampowered.com/forums/login.php?do=login")
if not os.path.isfile('login.php?do=login'):
continue
with open('login.php?do=login') as f:
if 'alt="Forum Database Error"' in f.read():
continue
break
else:
raise Exception('Could not log in.')
wget_args.append('http://forums.steampowered.com/forums/showthread.php')
if item_type == 'threads':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-thread: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/showthread.php?t={i}'.format(i=i))
elif item_type == 'forums':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-forum: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}&daysprune=-1'.format(i=i))
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}'.format(i=i))
elif item_type == 'members':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-member: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/member.php?u={i}'.format(i=i))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title = "Steam Users' Forum",
project_html = """
<img class="project-logo" alt="Steam Logo" src="http://archiveteam.org/images/thumb/4/48/Steam_Icon_2014.png/100px-Steam_Icon_2014.png" />
<h2>Steam Users' Forum <span class="links"><a href="http://forums.steampowered.com/forums">Website</a> · <a href="http://tracker.archiveteam.org/spuf/">Leaderboard</a></span></h2>
<p>Getting killed June 5th.</p>
""",
utc_deadline = datetime.datetime(2017, 6, 4, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="spuf"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
"warc_file_base": ItemValue("warc_file_base"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| unlicense | -1,336,016,031,714,418,700 | 35.868852 | 432 | 0.574033 | false | 3.543965 | false | false | false |
Encrylize/flask-blogger | app/utils/helpers.py | 1 | 1218 | from urllib.parse import urljoin, urlparse
from flask import request
def get_or_create(model, **kwargs):
"""
Gets or creates an instance of model.
Args:
model: SQLAlchemy model
**kwargs: Model properties
Returns:
An instance of model and True if it was created, False if it was not.
"""
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True
def is_safe_url(target):
"""
Checks if a URL is safe.
Args:
target: The URL to check
Returns:
True if the URL is safe, False if it is not.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http',
'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Gets a safe redirect target.
Returns:
The first safe redirect target.
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
elif is_safe_url(target):
return target
| mit | 7,296,754,981,301,055,000 | 20 | 77 | 0.591954 | false | 4.2 | false | false | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_images_operations.py | 1 | 29335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ImagesOperations:
"""ImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Image')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Create or update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.Image
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ImageUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.ImageUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Image"]:
"""Update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation.
:type parameters: ~azure.mgmt.compute.v2019_12_01.models.ImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_12_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
image_name=image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
async def get(
self,
resource_group_name: str,
image_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Image":
"""Gets an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_12_01.models.Image
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ImageListResult"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'} # type: ignore
| mit | -7,266,428,311,589,017,000 | 47.407591 | 181 | 0.634907 | false | 4.309534 | true | false | false |
pradyu1993/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 1 | 34415 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD style
import numpy as np
from scipy import linalg, optimize, rand
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import array2d, check_random_state
from ..utils import deprecated
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij.astype(np.int)
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood rstimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
`theta_`: array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
`reduced_likelihood_function_value_`: array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
# Run input checks
self._check_params()
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X = array2d(X)
y = np.asarray(y).ravel()[:, np.newaxis]
# Check shapes of DOE & observations
n_samples_X, n_features = X.shape
n_samples_y = y.shape[0]
if n_samples_X != n_samples_y:
raise ValueError("X and y must have the same number of rows.")
else:
n_samples = n_samples_X
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple input features cannot have the same"
" value")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
+ "likely something is going wrong with the "
+ "regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
+ "n_samples=%d must be greater than the "
+ "regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
+ "autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
+ "Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
+ "Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
+ "Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simulatneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) with the Mean Squared Error at x.
"""
# Check input shapes
X = array2d(X)
n_eval, n_features_X = X.shape
n_samples, n_features = self.X.shape
# Run input checks
self._check_params(n_samples)
if n_features_X != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
+ "should match the sample size used for fit() "
+ "which is %d.") % (n_features_X, n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
+ "at instanciation. Need to recompute "
+ "autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros(y.shape)
MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if np.min(np.sum(D, axis=1)) == 0. \
and self.corr != correlation.pure_nugget:
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
+ "of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
@deprecated("to be removed in 0.14, access ``self.theta_`` etc. directly "
" after fit.")
def arg_max_reduced_likelihood_function(self):
return self._arg_max_reduced_likelihood_function()
@property
@deprecated('``theta`` is deprecated and will be removed in 0.14, '
'please use ``theta_`` instead.')
def theta(self):
return self.theta_
@property
@deprecated("``reduced_likelihood_function_value`` is deprecated and will"
"be removed in 0.14, please use "
"``reduced_likelihood_function_value_`` instead.")
def reduced_likelihood_function_value(self):
return self.reduced_likelihood_function_value_
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print "The chosen optimizer is: " + str(self.optimizer)
if self.random_start > 1:
print str(self.random_start) + " random starts are required."
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(theta=10.
** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t: \
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t: \
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ rand(self.theta0.size).reshape(self.theta0.shape) \
* np.log10(self.thetaU / self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints, iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_minus_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
optimal_rlf_value = - optimal_minus_rlf_value
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print "%s completed" % (5 * percent_completed)
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = array2d(self.theta0.min())
self.thetaL = array2d(self.thetaL.min())
self.thetaU = array2d(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print "Proceeding along dimension %d..." % (i + 1)
self.theta0 = array2d(theta_iso)
self.thetaL = array2d(thetaL[0, i])
self.thetaU = array2d(thetaU[0, i])
def corr_cut(t, d):
return corr(array2d(np.hstack([
optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i + 1)::]])), d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError(("This optimizer ('%s') is not "
+ "implemented yet. Please contribute!")
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError(("regr should be one of %s or callable, "
+ "%s was given.")
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = array2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError(("corr should be one of %s or callable, "
+ "%s was given.")
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
+ "'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = array2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = array2d(self.thetaL)
self.thetaU = array2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
+ "same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
+ "thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
+ "neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause | -5,101,911,511,660,186,000 | 37.366778 | 79 | 0.555165 | false | 4.222181 | false | false | false |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/core/internals.py | 1 | 151884 | import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(convert_numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
formatter = None
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
if formatter is None and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = isnull(v)
v = v.astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(convert_dates=True,
convert_numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,len(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.core.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(values.view('i8').ravel(),
tz=None,
format=format,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = isinstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_getitem(placement):
return value
elif value_is_cat:
# categorical
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if isinstance(v, (SparseArray, ABCSparseSeries)):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if isinstance(merged_blocks, list):
new_blocks.extend(merged_blocks)
else:
new_blocks.append(merged_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
res = op(a, b)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return com.CategoricalDtype(), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = com._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindex its
# block: no ax0 reindexing took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs are sequential (and
# length match is checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers={}):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__,
self.block, self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values_flat = self.block.values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i: i + chunk_len]).all():
return False
return True
@cache_readonly
def needs_block_conversion(self):
""" we might need to convert the joined values to a suitable block repr """
block = self.block
return block is not None and (block.is_sparse or block.is_categorical)
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null and not getattr(self.block,'is_categorical',None):
missing_arr = np.empty(self.shape, dtype=empty_dtype)
if np.prod(self.shape):
# NumPy 1.6 workaround: this statement gets strange if all
# blocks are of same dtype and some of them are empty:
# empty one are considered "null" so they must be filled,
# but no dtype upcasting happens and the dtype may not
# allow NaNs.
#
# In general, no one should get hurt when one tries to put
# incorrect values into empty array, but numpy 1.6 is
# strict about that.
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if self.block.is_categorical:
# preserve the categoricals for validation in _concat_compat
return self.block.values
elif self.block.is_sparse:
# preserve the sparse array for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = com.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| mit | 1,726,036,148,876,224,800 | 32.963327 | 134 | 0.537957 | false | 4.163943 | false | false | false |
ludbb/secp256k1-py | tests/test_schnorr.py | 1 | 1732 | import pytest
import secp256k1
def test_schnorr_simple():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
inst = secp256k1.PrivateKey()
raw_sig = inst.schnorr_sign(b'hello')
assert inst.pubkey.schnorr_verify(b'hello', raw_sig)
key2 = secp256k1.PrivateKey()
assert not key2.pubkey.schnorr_verify(b'hello', raw_sig)
blank = secp256k1.PublicKey()
pubkey = blank.schnorr_recover(b'hello', raw_sig)
pub = secp256k1.PublicKey(pubkey)
assert pub.serialize() == inst.pubkey.serialize()
def test_schnorr_partial():
if not secp256k1.HAS_SCHNORR:
pytest.skip('secp256k1_schnorr not enabled, skipping')
return
signer1 = secp256k1.PrivateKey()
pubnonce1, privnonce1 = signer1.schnorr_generate_nonce_pair(b'hello')
signer2 = secp256k1.PrivateKey()
pubnonce2, privnonce2 = signer2.schnorr_generate_nonce_pair(b'hello')
# First test partial signatures with only two signers.
partial1 = signer1.schnorr_partial_sign(b'hello', privnonce1, pubnonce2)
partial2 = signer2.schnorr_partial_sign(b'hello', privnonce2, pubnonce1)
blank = secp256k1.PublicKey(flags=secp256k1.NO_FLAGS)
sig = blank.schnorr_partial_combine([partial1, partial2])
# Recover the public key from the combined signature.
pubkey = secp256k1.PublicKey().schnorr_recover(b'hello', sig)
assert blank.public_key is None
# Check that the combined public keys from signer1 and signer2
# match the recovered public key.
blank.combine(
[signer1.pubkey.public_key, signer2.pubkey.public_key])
assert blank.public_key
assert secp256k1.PublicKey(pubkey).serialize() == blank.serialize()
| mit | 3,662,207,017,736,819,700 | 35.083333 | 76 | 0.711894 | false | 3.166362 | true | false | false |
Youwotma/portia | slybot/slybot/pageactions.py | 1 | 1528 | import json
import re
LUA_SOURCE = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:runjs(splash.args.js_source))
assert(splash:wait_for_resume(splash.args.slybot_actions_source))
splash:set_result_content_type("text/html")
return splash.html()
end
"""
JS_SOURCE = """
function main(splash) {
var events = (%s);
try{
__slybot__performEvents(events, function(){
splash.resume();
});
}catch(e){
splash.error(e);
}
}
"""
def filter_for_url(url):
def _filter(page_action):
accept = page_action.get('accept')
reject = page_action.get('reject')
if reject and re.search(reject, url):
return False
if accept and not re.search(accept, url):
return False
return True
return _filter
class PageActionsMiddleware(object):
def process_request(self, request, spider):
splash_options = request.meta.get('splash', None)
if not splash_options: # Already processed or JS disabled
return
splash_args = splash_options.get('args', {})
events = spider.page_actions
url = splash_args['url']
events = filter(filter_for_url(url), events)
if len(events):
splash_options['endpoint'] = 'execute'
splash_args.update({
"lua_source": LUA_SOURCE,
"slybot_actions_source": (JS_SOURCE % json.dumps(events)),
})
__all__ = ['PageActionsMiddleware']
| bsd-3-clause | -9,078,013,978,702,002,000 | 26.781818 | 74 | 0.590314 | false | 3.570093 | false | false | false |
jimmycallin/master-thesis | architectures/nn_discourse_parser/nets/data_reader.py | 1 | 6857 | import json
import codecs
class DRelation(object):
"""Implicit discourse relation object
The object is created from the CoNLL-json formatted data.
The format can be a bit clunky to get certain information.
So convenient methods should be implemented here mostly to be used
by the feature functions
"""
def __init__(self, relation_dict, parse):
self.relation_dict = relation_dict
self.parse = parse
self._arg_tokens = {}
self._arg_tokens[1] = None
self._arg_tokens[2] = None
self._arg_words = {}
self._arg_words[1] = None
self._arg_words[2] = None
self._arg_tree = {}
self._arg_tree[1] = None
self._arg_tree[2] = None
self._arg1_tree = None
self._arg1_tree_token_indices = None
self._arg2_tree = None
self._arg2_tree_token_indices = None
@property
def senses(self):
return self.relation_dict['Sense']
def arg_words(self, arg_pos):
"""Returns a list of Word objects"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_words[arg_pos] is None:
key = 'Arg%s' % arg_pos
word_list = self.relation_dict[key]['TokenList']
self._arg_words[arg_pos] = [Word(x, self.parse[self.doc_id]) for x in word_list]
return self._arg_words[arg_pos]
def arg_tree(self, arg_pos):
"""Extract the tree for the argument
One tree only. Truncated as needed
Returns:
1) tree string
2) token indices (not address tuples) of that tree.
"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tree[arg_pos] is None:
trees, sentence_indices = self.arg_trees(arg_pos)
if arg_pos == 1:
tree = trees[-1]
sentence_index = sentence_indices[-1]
elif arg_pos == 2:
tree = trees[0]
sentence_index = sentence_indices[0]
key = 'Arg%s' % arg_pos
token_indices = [x[4] for x in self.relation_dict[key]['TokenList'] if x[3] == sentence_index]
self._arg_tree[arg_pos] = (tree, token_indices)
return self._arg_tree[arg_pos]
def arg_dtree_rule_list(self, arg_pos):
"""Returns a list of arcs in the dependency tree(s) for the arg """
assert(arg_pos == 1 or arg_pos == 2)
token_list = self.arg_token_addresses(arg_pos)
sentence_indices = set([x[3] for x in token_list])
sentence_index_to_dependency_tree = {}
for sentence_index in sentence_indices:
dependencies = \
self.parse[self.doc_id]['sentences'][sentence_index]['dependencies']
index_to_dependency = {}
# a dependency looks like this [u'prep', u'reported-8', u'In-1']
for dep in dependencies:
rel_type = dep[0]
head, _ = dep[1].rsplit('-', 1)
dependent, index = dep[2].rsplit('-', 1)
index_to_dependency[int(index)] = [rel_type, head, dependent]
sentence_index_to_dependency_tree[sentence_index] = index_to_dependency
rule_list = []
for token_address in token_list:
_, _, _, sentence_index, token_index = token_address
dtree = sentence_index_to_dependency_tree[sentence_index]
if token_index in dtree:
rule_list.append('_'.join(dtree[token_index]))
return rule_list
def arg_token_addresses(self, arg_pos):
assert(arg_pos == 1 or arg_pos == 2)
key = 'Arg%s' % arg_pos
return self.relation_dict[key]['TokenList']
@property
def doc_id(self):
return self.relation_dict['DocID']
@property
def relation_id(self):
return self.relation_dict['ID']
@property
def relation_type(self):
return self.relation_dict['Type']
@property
def doc_relation_id(self):
return '%s_%s' % (self.doc_id, self.relation_id)
def arg_tokens(self, arg_pos):
"""Returns a list of raw tokens"""
assert(arg_pos == 1 or arg_pos == 2)
if self._arg_tokens[arg_pos] is None:
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
self._arg_tokens[arg_pos] = [self.parse[self.doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in token_list]
return self._arg_tokens[arg_pos]
def arg_trees(self, arg_pos):
key = 'Arg%s' % arg_pos
token_list = self.relation_dict[key]['TokenList']
sentence_indices = set([x[3] for x in token_list])
return [self.parse[self.doc_id]['sentences'][x]['parsetree'] for x in sentence_indices], list(sentence_indices)
def __repr__(self):
return self.relation_dict.__repr__()
def __str__(self):
return self.relation_dict.__str__()
class Word(object):
"""Word class wrapper
[u"'ve",
{u'CharacterOffsetBegin':2449,
u'CharacterOffsetEnd':2452,
u'Linkers':[u'arg2_15006',u'arg1_15008'],
u'PartOfSpeech':u'VBP'}]
"""
def __init__(self, word_address, parse):
self.word_address = word_address
self.word_token, self.word_info = parse['sentences'][word_address[3]]['words'][word_address[4]]
@property
def pos(self):
return self.word_info['PartOfSpeech']
@property
def lemma(self):
return self.word_info['Lemma']
@property
def sentence_index(self):
return self.word_address[3]
def extract_implicit_relations(data_folder, label_function=None):
#parse_file = '%s/pdtb-parses-plus.json' % data_folder
#parse_file = '%s/pdtb-parses.json' % data_folder
parse_file = '%s/parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
#relation_file = '%s/pdtb-data-plus.json' % data_folder
#relation_file = '%s/pdtb-data.json' % data_folder
relation_file = '%s/relations.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] == 'Implicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
def extract_non_explicit_relations(data_folder, label_function=None):
parse_file = '%s/pdtb-parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_file = '%s/pdtb-data.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] != 'Explicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
| mit | 4,789,138,021,986,704,000 | 35.473404 | 122 | 0.589616 | false | 3.503832 | false | false | false |
rwl/openpowersystem | cdpsm/iec61970/core/voltage_level.py | 1 | 2591 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< imports
# @generated
from cdpsm.iec61970.core.equipment_container import EquipmentContainer
from cdpsm.iec61970.core.base_voltage import BaseVoltage
from cdpsm.iec61970.core.substation import Substation
from cdpsm.iec61970.domain import Voltage
from google.appengine.ext import db
# >>> imports
class VoltageLevel(EquipmentContainer):
""" A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
# <<< voltage_level.attributes
# @generated
# The bus bar's low voltage limit
low_voltage_limit = Voltage
# The bus bar's high voltage limit
high_voltage_limit = Voltage
# >>> voltage_level.attributes
# <<< voltage_level.references
# @generated
# The base voltage used for all equipment within the VoltageLevel.
base_voltage = db.ReferenceProperty(BaseVoltage,
collection_name="voltage_level")
# Virtual property. The association is used in the naming hierarchy.
pass # bays
# The association is used in the naming hierarchy.
substation = db.ReferenceProperty(Substation,
collection_name="voltage_levels")
# >>> voltage_level.references
# <<< voltage_level.operations
# @generated
# >>> voltage_level.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 | -1,730,218,190,851,964,200 | 38.861538 | 235 | 0.677345 | false | 4.421502 | false | false | false |
ganga-devs/ganga | ganga/GangaDirac/Lib/Server/DiracCommands.py | 1 | 18300 | # Dirac commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobGroupJobs(jg):
''' Return jobs in a group'''
return dirac.selectJobs(jobGroup=jg)
@diracCommand
def kill(id):
''' Kill a given DIRAC Job ID within DIRAC '''
return dirac.deleteJob(id)
@diracCommand
def peek(id):
''' Peek at the DIRAC Job id and return what we saw '''
return dirac.peekJob(id)
@diracCommand
def getJobCPUTime(id):
''' Get the amount of CPU time taken by the DIRAC Job id'''
return dirac.getJobCPUTime(id)
@diracCommand
def reschedule(id):
''' Reschedule within DIRAC a given DIRAC Job id'''
return dirac.reschedule(id)
@diracCommand
def submit(djob, mode='wms'):
''' Submit a DIRAC job given by the jdl:djob with a given mode '''
return dirac.submitJob(djob, mode=mode)
@diracCommand
def ping(system, service):
''' Ping a given service on a given system running DIRAC '''
return dirac.ping(system, service)
@diracCommand
def removeFile(lfn):
''' Remove a given LFN from the DFC'''
ret = {}
if type(lfn) is list:
for l in lfn:
ret.update(dirac.removeFile(l))
else:
ret.update(dirac.removeFile(lfn))
return ret
@diracCommand
def getMetadata(lfn):
''' Return the metadata associated with a given :DN'''
return dirac.getLfnMetadata(lfn)
@diracCommand
def getReplicas(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location '''
return dirac.getReplicas(lfns, active=True, preferDisk = True)
@diracCommand
def getReplicasForJobs(lfns):
''' Return the locations of the replicas of a given LFN in a dict format, SE: location.
This is for use in the splitter to negate copies at SEs that are not to be used for user jobs '''
return dirac.getReplicasForJobs(lfns)
@diracCommand
def getAccessURL(lfn, SE, protocol=False):
''' Return the access URL for the given LFN, storage element and protocol. The protocol should be in the form of a list '''
return dirac.getAccessURL(lfn, SE, False, protocol)
@diracCommand
def getFile(lfns, destDir=''):
''' Put the physical file behind the LFN in the destDir path'''
return dirac.getFile(lfns, destDir=destDir)
@diracCommand
def replicateFile(lfn, destSE, srcSE='', locCache=''):
''' Replicate a given LFN from a srcSE to a destSE'''
res = dirac.replicateFile(lfn, destSE, srcSE, locCache)
return res
@diracCommand
def removeReplica(lfn, sE):
''' Remove the physical files and LFN from the DFC'''
return dirac.removeReplica(lfn, sE)
@diracCommand
def getOutputData(id, outputFiles='', destinationDir=''):
''' Return output data of a requeted DIRAC Job id, place outputFiles in a given destinationDir') '''
return dirac.getJobOutputData(id, outputFiles, destinationDir)
@diracCommand
def splitInputData(files, files_per_job):
''' Split list of files ito a list of list of smaller files (below files_per_job in length) and return the list of lists'''
return dirac.splitInputData(files, files_per_job)
@diracCommand
def getInputDataCatalog(lfns, site, xml_file):
''' Get the XML describing the given LFNs at a given site'''
return dirac.getInputDataCatalog(lfns, site, xml_file)
@diracCommand
def uploadFile(lfn, file, diracSEs, guid=None):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
outerr = {}
for se in diracSEs:
result = dirac.addFile(lfn, file, se, guid)
if result.get('OK', False) and lfn in result.get('Value', {'Successful': {}})['Successful']:
result['Value']['Successful'][lfn].update({'DiracSE': se})
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
guid = md['Value']['Successful'][lfn]['GUID']
result['Value']['Successful'][lfn].update({'GUID': guid})
return result
outerr.update({se: result})
return outerr
@diracCommand
def addFile(lfn, file, diracSE, guid):
''' Upload a given file to an lfn with 1 replica places at each element in diracSEs. Use a given guid if given'''
return dirac.addFile(lfn, file, diracSE, guid)
@diracCommand
def getOutputSandbox(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, pipe_out=True):
'''
Get the outputsandbox and return the output from Dirac to the calling function
id: the DIRAC jobid of interest
outputDir: output directory locall on disk to use
oversized: is this output sandbox oversized this will be modified
noJobDir: should we create a folder with the DIRAC job ID?
output: should I output the Dirac output or should I return a python object (False)
unpack: should the sandbox be untarred when downloaded'''
result = dirac.getOutputSandbox(id, outputDir, oversized, noJobDir, unpack)
if result is not None and result.get('OK', False):
if not noJobDir:
tmpdir = os.path.join(outputDir, str(id))
os.system('mv -f %s/* %s/. ; rm -rf %s' % (tmpdir, outputDir, tmpdir))
os.system('for file in $(ls %s/*Ganga_*.log); do ln -s ${file} %s/stdout; break; done' % (outputDir, outputDir))
#So the download failed. Maybe the sandbox was oversized and stored on the grid. Check in the job parameters and download it
else:
parameters = dirac.getJobParameters(id)
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'OutputSandboxLFN' in parameters:
result = dirac.getFile(parameters['OutputSandboxLFN'], destDir=outputDir)
dirac.removeFile(parameters['OutputSandboxLFN'])
return result
@diracCommand
def getOutputDataInfo(id, pipe_out=True):
''' Get information on the output data generated by a job of ID and pipe it out or return it'''
ret = {}
result = getOutputDataLFNs(id, pipe_out=False)
if result.get('OK', False) and 'Value' in result:
for lfn in result.get('Value', []):
file_name = os.path.basename(lfn)
ret[file_name] = {}
ret[file_name]['LFN'] = lfn
md = dirac.getLfnMetadata(lfn)
if md.get('OK', False) and lfn in md.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['GUID'] = md['Value']['Successful'][lfn]['GUID']
# this catches if fail upload, note lfn still exists in list as
# dirac tried it
elif md.get('OK', False) and lfn in md.get('Value', {'Failed': {}})['Failed']:
ret[file_name]['LFN'] = '###FAILED###'
ret[file_name]['LOCATIONS'] = md['Value']['Failed'][lfn]
ret[file_name]['GUID'] = 'NotAvailable'
continue
rp = dirac.getReplicas(lfn)
if rp.get('OK', False) and lfn in rp.get('Value', {'Successful': {}})['Successful']:
ret[file_name]['LOCATIONS'] = rp['Value']['Successful'][lfn].keys()
return ret
# could shrink this with dirac.getJobOutputLFNs from ##dirac
@diracCommand
def getOutputDataLFNs(id, pipe_out=True):
''' Get the outputDataLFN which have been generated by a Dirac job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
lfns = []
ok = False
message = 'The outputdata LFNs could not be found.'
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
# remove the sandbox if it has been uploaded
sandbox = None
if 'OutputSandboxLFN' in parameters:
sandbox = parameters['OutputSandboxLFN']
# now find out about the outputdata
if 'UploadedOutputData' in parameters:
lfn_list = parameters['UploadedOutputData']
import re
lfns = re.split(',\s*', lfn_list)
if sandbox is not None and sandbox in lfns:
lfns.remove(sandbox)
ok = True
elif parameters is not None and 'Message' in parameters:
message = parameters['Message']
result = {'OK': ok}
if ok:
result['Value'] = lfns
else:
result['Message'] = message
return result
@diracCommand
def normCPUTime(id, pipe_out=True):
''' Get the normalied CPU time that has been used by a DIRAC job of ID and pipe it out or return it'''
parameters = dirac.getJobParameters(id)
ncput = None
if parameters is not None and parameters.get('OK', False):
parameters = parameters['Value']
if 'NormCPUTime(s)' in parameters:
ncput = parameters['NormCPUTime(s)']
return ncput
@diracCommand
def finished_job(id, outputDir=os.getcwd(), unpack=True, oversized=True, noJobDir=True, downloadSandbox = True):
''' Nesting function to reduce number of calls made against DIRAC when finalising a job, takes arguments such as getOutputSandbox
Returns the CPU time of the job as a dict, the output sandbox information in another dict and a dict of the LFN of any uploaded data'''
out_cpuTime = normCPUTime(id, pipe_out=False)
if downloadSandbox:
out_sandbox = getOutputSandbox(id, outputDir, unpack, oversized, noJobDir, pipe_out=False)
else:
out_sandbox = None
out_dataInfo = getOutputDataInfo(id, pipe_out=False)
outStateTime = {'completed' : getStateTime(id, 'completed', pipe_out=False)}
return (out_cpuTime, out_sandbox, out_dataInfo, outStateTime)
@diracCommand
def finaliseJobs(inputDict, statusmapping, downloadSandbox=True, oversized=True, noJobDir=True):
''' A function to get the necessaries to finalise a whole bunch of jobs. Returns a dict of job information and a dict of stati.'''
returnDict = {}
statusList = dirac.getJobStatus(list(inputDict))
for diracID in inputDict:
returnDict[diracID] = {}
returnDict[diracID]['cpuTime'] = normCPUTime(diracID, pipe_out=False)
if downloadSandbox:
returnDict[diracID]['outSandbox'] = getOutputSandbox(diracID, inputDict[diracID], oversized, noJobDir, pipe_out=False)
else:
returnDict[diracID]['outSandbox'] = None
returnDict[diracID]['outDataInfo'] = getOutputDataInfo(diracID, pipe_out=False)
returnDict[diracID]['outStateTime'] = {'completed' : getStateTime(diracID, 'completed', pipe_out=False)}
return returnDict, statusList
@diracCommand
def status(job_ids, statusmapping, pipe_out=True):
'''Function to check the statuses and return the Ganga status of a job after looking it's DIRAC status against a Ganga one'''
# Translate between the many statuses in DIRAC and the few in Ganga
#return {'OK':True, 'Value':[['WIP', 'WIP', 'WIP', 'WIP', 'WIP']]}
result = dirac.getJobStatus(job_ids)
if not result['OK']:
return result
status_list = []
bulk_status = result['Value']
for _id in job_ids:
job_status = bulk_status.get(_id, {})
minor_status = job_status.get('MinorStatus', None)
dirac_status = job_status.get('Status', None)
dirac_site = job_status.get('Site', None)
ganga_status = statusmapping.get(dirac_status, None)
if ganga_status is None:
ganga_status = 'failed'
dirac_status = 'Unknown: No status for Job'
#if dirac_status == 'Completed' and (minor_status not in ['Pending Requests']):
# ganga_status = 'running'
if minor_status in ['Uploading Output Data']:
ganga_status = 'running'
try:
from DIRAC.Core.DISET.RPCClient import RPCClient
monitoring = RPCClient('WorkloadManagement/JobMonitoring')
app_status = monitoring.getJobAttributes(_id)['Value']['ApplicationStatus']
except:
app_status = "unknown ApplicationStatus"
status_list.append([minor_status, dirac_status, dirac_site, ganga_status, app_status])
return status_list
@diracCommand
def getStateTime(id, status, pipe_out=True):
''' Return the state time from DIRAC corresponding to DIRACJob tranasitions'''
log = dirac.getJobLoggingInfo(id)
if 'Value' not in log:
return None
L = log['Value']
checkstr = ''
if status == 'running':
checkstr = 'Running'
elif status == 'completed':
checkstr = 'Done'
elif status == 'completing':
checkstr = 'Completed'
elif status == 'failed':
checkstr = 'Failed'
else:
checkstr = ''
if checkstr == '':
print("%s" % None)
return
for l in L:
if checkstr in l[0]:
T = datetime.datetime(*(time.strptime(l[3], "%Y-%m-%d %H:%M:%S")[0:6]))
return T
return None
@diracCommand
def getBulkStateTime(job_ids, status, pipe_out=True):
''' Function to repeatedly call getStateTime for multiple Dirac Job id and return the result in a dictionary '''
result = {}
for this_id in job_ids:
result[this_id] = getStateTime(this_id, status, pipe_out=False)
return result
@diracCommand
def monitorJobs(job_ids, status_mapping, pipe_out=True):
''' This combines 'status' and 'getBulkStateTime' into 1 function call for monitoring
'''
status_info = status(job_ids, status_mapping, pipe_out=False)
state_job_status = {}
for job_id, this_stat_info in zip(job_ids, status_info):
if this_stat_info:
update_status = this_stat_info[3]
if update_status not in state_job_status:
state_job_status[update_status] = []
state_job_status[update_status].append(job_id)
state_info = {}
for this_status, these_jobs in state_job_status.items():
state_info[this_status] = getBulkStateTime(these_jobs, this_status, pipe_out=False)
return (status_info, state_info)
@diracCommand
def timedetails(id):
''' Function to return the getJobLoggingInfo for a DIRAC Job of id'''
log = dirac.getJobLoggingInfo(id)
d = {}
for i in range(0, len(log['Value'])):
d[i] = log['Value'][i]
return d
# DiracAdmin commands
#/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
@diracCommand
def getJobPilotOutput(id, dir):
''' Get the output of the DIRAC pilot that this job was running on and place it in dir'''
pwd = os.getcwd()
try:
os.chdir(dir)
os.system('rm -f pilot_%d/std.out && rmdir pilot_%d ' % (id, id))
result = DiracAdmin().getJobPilotOutput(id)
finally:
os.chdir(pwd)
return result
@diracCommand
def getServicePorts():
''' Get the service ports from the DiracAdmin based upon the Dirac config'''
return DiracAdmin().getServicePorts()
@diracCommand
def isSEArchive(se):
''' Ask if the specified SE is for archive '''
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
return DMSHelpers().isSEArchive(se)
@diracCommand
def getSitesForSE(se):
''' Get the Sites associated with this SE'''
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
result = getSitesForSE(storageElement=se)
return result
@diracCommand
def getSEsForSite(site):
''' Get the list of SE associated with this site'''
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
result = getSEsForSite(site)
return result
@diracCommand
def getSESiteMapping():
'''Get the mapping of SEs and sites'''
from DIRAC.Core.Utilities.SiteSEMapping import getSESiteMapping
result = getSESiteMapping()
return result
@diracCommand
def checkSEStatus(se, access = 'Write'):
''' returns the value of a certain SE status flag (access or other)
param se: Storage Element name
type se: string
param access: type of access
type access: string in ('Read', 'Write', 'Remove', 'Check')
returns: True or False
'''
result = dirac.checkSEAccess(se, access)
return result
@diracCommand
def listFiles(baseDir, minAge = None):
''' Return a list of LFNs for files stored on the grid in the argument
directory and its subdirectories
param baseDir: Top directory to begin search
type baseDir: string
param minAge: minimum age of files to be returned
type minAge: string format: "W:D:H"
'''
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
fc = FileCatalog()
from datetime import datetime, timedelta
withMetaData = False
cutoffTime = datetime.utcnow()
import re
r = re.compile('\d:\d:\d')
if r.match(minAge):
withMetaData = True
timeList = minAge.split(':')
timeLimit = timedelta(weeks = int(timeList[0]), days = int(timeList[1]), hours = int(timeList[2]))
cutoffTime = datetime.utcnow() - timeLimit
baseDir = baseDir.rstrip('/')
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len(activeDirs) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory(currentDir, withMetaData, timeout = 360)
if not res['OK']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] )
elif currentDir in res['Value']['Failed']:
return "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] )
else:
dirContents = res['Value']['Successful'][currentDir]
subdirs = dirContents['SubDirs']
files = dirContents['Files']
if not subdirs and not files:
emptyDirs.append( currentDir )
else:
for subdir in sorted( subdirs, reverse=True):
if (not withMetaData) or subdirs[subdir]['CreationDate'] < cutoffTime:
activeDirs.append(subdir)
for filename in sorted(files):
fileOK = False
if (not withMetaData) or files[filename]['MetaData']['CreationDate'] < cutoffTime:
fileOK = True
if not fileOK:
files.pop(filename)
allFiles += sorted(files)
return allFiles
| gpl-2.0 | -7,655,071,968,776,760,000 | 35.094675 | 139 | 0.640109 | false | 3.670277 | false | false | false |
rafaelvieiras/script.pseudotv.live | resources/lib/ChannelListThread.py | 1 | 9795 | # Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon
import subprocess, os
import time, threading
import datetime
import sys, re
import random, traceback
from ChannelList import ChannelList
from Channel import Channel
from Globals import *
from Artdownloader import *
class ChannelListThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.myOverlay = None
sys.setcheckinterval(25)
self.chanlist = ChannelList()
self.paused = False
self.fullUpdating = True
self.Artdownloader = Artdownloader()
def log(self, msg, level = xbmc.LOGDEBUG):
log('ChannelListThread: ' + msg, level)
def run(self):
self.log("Starting")
self.chanlist.exitThread = False
self.chanlist.readConfig()
self.chanlist.sleepTime = 0.1
if self.myOverlay == None:
self.log("Overlay not defined. Exiting.")
return
self.chanlist.myOverlay = self.myOverlay
self.fullUpdating = (self.myOverlay.backgroundUpdating == 0)
validchannels = 0
for i in range(self.myOverlay.maxChannels):
self.chanlist.channels.append(Channel())
if self.myOverlay.channels[i].isValid:
validchannels += 1
# Don't load invalid channels if minimum threading mode is on
if self.fullUpdating and self.myOverlay.isMaster:
if validchannels < self.chanlist.enteredChannelCount:
title = 'PseudoTV Live, Background Loading...'
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000 , THUMB))
for i in range(self.myOverlay.maxChannels):
if self.myOverlay.channels[i].isValid == False:
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(1)
if self.paused == False:
break
self.chanlist.channels[i].setAccessTime(self.myOverlay.channels[i].lastAccessTime)
try:
if self.chanlist.setupChannel(i + 1, True, True, False) == True:
while self.paused:
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(1)
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.channels[i].isValid == True:
title = "PseudoTV Live, Channel " + str(i + 1) + " Added"
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s)' % (title, 4000, THUMB))
except Exception,e:
self.log("Unknown Channel Creation Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
REAL_SETTINGS.setSetting('ForceChannelReset', 'false')
self.chanlist.sleepTime = 0.3
if REAL_SETTINGS.getSetting("ArtService_Enabled") == "true":
InfoTimer = INFOBAR_TIMER[int(REAL_SETTINGS.getSetting('InfoTimer'))]
self.ArtServiceThread = threading.Timer(float(InfoTimer), self.Artdownloader.ArtService)
self.ArtServiceThread.name = "ArtServiceThread"
self.ArtServiceThread.start()
while True:
for i in range(self.myOverlay.maxChannels):
modified = True
while modified == True and self.myOverlay.channels[i].getTotalDuration() < PREP_CHANNEL_TIME and self.myOverlay.channels[i].Playlist.size() < 16288:
# If minimum updating is on, don't attempt to load invalid channels
if self.fullUpdating == False and self.myOverlay.channels[i].isValid == False and self.myOverlay.isMaster:
break
modified = False
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
curtotal = self.myOverlay.channels[i].getTotalDuration()
if self.myOverlay.isMaster:
if curtotal > 0:
# When appending, many of the channel variables aren't set, so copy them over.
# This needs to be done before setup since a rule may use one of the values.
# It also needs to be done after since one of them may have changed while being setup.
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
# Only allow appending valid channels, don't allow erasing them
try:
self.chanlist.setupChannel(i + 1, True, False, True)
except Exception,e:
self.log("Unknown Channel Appending Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.chanlist.channels[i].playlistPosition = self.myOverlay.channels[i].playlistPosition
self.chanlist.channels[i].showTimeOffset = self.myOverlay.channels[i].showTimeOffset
self.chanlist.channels[i].lastAccessTime = self.myOverlay.channels[i].lastAccessTime
self.chanlist.channels[i].totalTimePlayed = self.myOverlay.channels[i].totalTimePlayed
self.chanlist.channels[i].isPaused = self.myOverlay.channels[i].isPaused
self.chanlist.channels[i].mode = self.myOverlay.channels[i].mode
else:
try:
self.chanlist.setupChannel(i + 1, True, True, False)
except Exception,e:
self.log("Unknown Channel Modification Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
else:
try:
# We're not master, so no modifications...just try and load the channel
self.chanlist.setupChannel(i + 1, True, False, False)
except Exception,e:
self.log("Unknown Channel Loading Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
return
self.myOverlay.channels[i] = self.chanlist.channels[i]
if self.myOverlay.isMaster:
ADDON_SETTINGS.setSetting('Channel_' + str(i + 1) + '_time', str(self.myOverlay.channels[i].totalTimePlayed))
if self.myOverlay.channels[i].getTotalDuration() > curtotal and self.myOverlay.isMaster:
modified = True
# A do-while loop for the paused state
while True:
if self.myOverlay.isExiting:
self.log("Closing thread")
return
time.sleep(2)
if self.paused == False:
break
timeslept = 0
if self.fullUpdating == False and self.myOverlay.isMaster:
return
# If we're master, wait 30 minutes in between checks. If not, wait 5 minutes.
while (timeslept < 1800 and self.myOverlay.isMaster == True) or (timeslept < 300 and self.myOverlay.isMaster == False):
if self.myOverlay.isExiting:
self.log("IsExiting")
return
time.sleep(2)
timeslept += 2
self.log("All channels up to date. Exiting thread.")
def pause(self):
self.paused = True
self.chanlist.threadPaused = True
def unpause(self):
self.paused = False
self.chanlist.threadPaused = False
| gpl-3.0 | -5,893,234,249,640,738,000 | 44.347222 | 164 | 0.54099 | false | 4.709135 | false | false | false |
AnoopAlias/nDeploy | scripts/update_cluster_ipmap.py | 1 | 1898 | #!/usr/bin/env python
import yaml
import argparse
import os
__author__ = "Anoop P Alias"
__copyright__ = "Copyright 2014, PiServe Technologies Pvt Ltd , India"
__license__ = "GPL"
__email__ = "[email protected]"
installation_path = "/opt/nDeploy" # Absolute Installation Path
cluster_config_file = installation_path+"/conf/ndeploy_cluster.yaml"
# Function defs
def update_ip_map(server, iphere, ipthere):
cluster_data_yaml = open(cluster_config_file, 'r')
cluster_data_yaml_parsed = yaml.safe_load(cluster_data_yaml)
cluster_data_yaml.close()
if cluster_data_yaml_parsed:
if server in cluster_data_yaml_parsed.keys():
connect_server_dict = cluster_data_yaml_parsed.get(server)
ipmap_dict = connect_server_dict.get("ipmap")
ipmap_dict[iphere] = ipthere
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
mydict = {server: {'ipmap': {iphere: ipthere}}}
cluster_data_yaml_parsed.update(mydict)
with open(cluster_config_file, 'w') as yaml_file:
yaml_file.write(yaml.dump(cluster_data_yaml_parsed, default_flow_style=False))
else:
print("Invalid cluster data")
parser = argparse.ArgumentParser(description="create/update nDeploy-cluster ipmap")
parser.add_argument("slave_hostname")
parser.add_argument("ip_here")
parser.add_argument("remote_ip")
args = parser.parse_args()
server_key = args.slave_hostname
ip_here = args.ip_here
remote_ip = args.remote_ip
if os.path.isfile(cluster_config_file):
update_ip_map(server_key, ip_here, remote_ip)
else:
mydict = {server_key: {'ipmap': {ip_here: remote_ip}}}
with open(cluster_config_file, 'w') as cluster_conf:
cluster_conf.write(yaml.dump(mydict, default_flow_style=False))
| gpl-3.0 | 6,655,193,397,231,080,000 | 34.148148 | 94 | 0.674921 | false | 3.278066 | true | false | false |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/django/db/migrations/recorder.py | 1 | 2868 | from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
| mit | 213,370,591,806,448,500 | 32.348837 | 112 | 0.642957 | false | 3.907357 | false | false | false |
staffanm/layeredconfig | layeredconfig/dictsource.py | 1 | 1625 | # this should possibly be a abstract class as well
from . import ConfigSource
class DictSource(ConfigSource):
def __init__(self, **kwargs):
"""If your backend data is exposable as a python dict, you can
subclass from this class to avoid implementing :py:meth:`has`,
:py:meth:`get`, :py:meth:`keys`, :py:meth:`subsection` and
:py:meth:`subsections`. You only need to write
:py:meth:`__init__` (which should set ``self.source`` to that
exposed dict), and possibly :py:meth:`typed` and
:py:meth:`save`.
"""
super(DictSource, self).__init__(**kwargs)
self.source = {}
def subsections(self):
for (k, v) in self.source.items():
if isinstance(v, dict):
yield k
def keys(self):
for (k, v) in self.source.items():
if not isinstance(v, dict) and not isinstance(v, type):
yield k
def subsection(self, key):
# Make an object of the correct type
return self.__class__(defaults=self.source[key],
parent=self,
identifier=self.identifier)
def typed(self, key):
# if we have it, we can type it
return key in self.source and self.source[key] is not None
def has(self, key):
# should return true for real values only, not type placeholders or sub-dicts
return key in self.source and not isinstance(self.source[key], (type, dict))
def get(self, key):
return self.source[key]
def set(self, key, value):
self.source[key] = value
| bsd-3-clause | 8,381,840,833,951,817,000 | 33.574468 | 85 | 0.580923 | false | 4.0625 | false | false | false |
samervin/arctic-scavengers-randomizer | arctic_cards/leaders.py | 1 | 3619 | # Fields
NAME = 'name'
SET = 'set'
USES_REFUGEES = 'uses-refugees'
TEXT = 'text'
# Set values
HQ_EXP = 'hq'
RECON_EXP = 'recon'
# Information not strictly contained on the card
COMMENT = 'comment'
class Leaders:
ALL_LEADERS = [
{
NAME: 'The Peacemaker',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to increase the power of another tribe member\s hunt or dig actions by +2.'
},
{
NAME: 'The Gangster',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Your Refugees have a fight of 0 and they count as 2 people for the purpose of breaking tied skirmishes.'
},
{
NAME: 'The Butcher',
SET: HQ_EXP,
TEXT: 'Each round you may kill 1 of your tribe members (remove the card permanently from play) and sell his/her internal organs for 1 food and 1 med.'
},
{
NAME: 'The Fanatic',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may use 1 Refugee from your hand as a suicide bomber against an opponent. '
'Discard 1 of your opponent\'s revealed cards (your choice), the Refugee dies in the process (remove card from play).'
},
{
NAME: 'The Organizer',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee to perform a draw of 2, but only keep 1. '
'No other cards may be played to modify this draw and you may not perform another draw this round.'
},
{
NAME: 'The Cannibal',
SET: HQ_EXP,
TEXT: 'Each round you may cannibalize 1 tribe member for 3 food (and subsequently remove that card from play). '
'You may not combine food from hunting or a garden when hiring with cannibalized food.'
},
{
NAME: 'The Sergent at Arms',
SET: HQ_EXP,
TEXT: 'You are immune to the disarm action, preventing saboteurs from discarding your tools. '
'When hiring saboteurs, you pay no food (cost for you is 1 med).',
COMMENT: 'This card is misspelled as printed: the correct spelling is Sergeant.'
},
{
NAME: 'The Mentor',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'Each round you may play 1 Refugee card to grant another tribe member a +1 to any action.'
},
{
NAME: 'The Excavator',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees have a dig of 1. '
'If a Refugee uses a digging tool (i.e. shovel or a pick axe), ignore the tool\'s normal bonus and add +1 to the score.'
},
{
NAME: 'The Ranger',
SET: HQ_EXP,
USES_REFUGEES: True,
TEXT: 'All of your Refugees and Tribe Families have a hunt of 1.'
},
{
NAME: 'The Swindler',
SET: RECON_EXP,
USES_REFUGEES: True,
TEXT: 'Once per turn, you may discard 1 Refugee to persuade a mercenary into joining your tribe for 1 less food '
'or discard two Refugees to reduce the price by 1 med.'
},
{
NAME: 'The Yardmaster',
SET: RECON_EXP,
TEXT: 'Once per turn, you may peek at the top 2 cards of the Junkyard. '
'Return both of them to the top or bottom of the Junkyard.'
}
]
| mit | 6,301,782,325,497,952,000 | 37.913978 | 162 | 0.546284 | false | 3.604582 | false | false | false |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/Crypto/Cipher/DES.py | 1 | 7100 | # -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty. The effective key length is
therefore 56 bits only.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
**Use AES, not DES. This module is provided only for legacy purposes.**
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES
>>>
>>> key = b'-8B key-'
>>> cipher = DES.new(key, DES.MODE_OFB)
>>> plaintext = b'sona si latine loqueris '
>>> msg = cipher.iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __package__
"""
import sys
from Crypto.Cipher import _create_cipher
from Crypto.Util.py3compat import byte_string
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, expect_byte_string)
_raw_des_lib = load_pycryptodome_raw_lib(
"Crypto.Cipher._raw_des",
"""
int DES_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int DES_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int DES_stop_operation(void *state);
""")
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
expect_byte_string(key)
if len(key) != key_size:
raise ValueError("Incorrect DES key length (%d bytes)" % len(key))
start_operation = _raw_des_lib.DES_start_operation
stop_operation = _raw_des_lib.DES_stop_operation
cipher = VoidPointer()
result = start_operation(key,
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the DES cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def new(key, mode, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
iv : byte string
(*Only* `MODE_CBC`, `MODE_CFB`, `MODE_OFB`, `MODE_OPENPGP`).
The initialization vector to use for encryption or decryption.
For `MODE_OPENPGP`, IV must be 8 bytes long for encryption
and 10 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
For all other modes, it must be 8 bytes long.
If not provided, a random byte string is generated (you can read it
back via the ``iv`` attribute).
nonce : byte string
(*Only* `MODE_EAX` and `MODE_CTR`).
A mandatory value that must never be reused for any other encryption.
For `MODE_CTR`, its length must be in the range ``[0..7]``.
For `MODE_EAX`, there are no restrictions, but it is recommended to
use at least 16 bytes.
If not provided for `MODE_EAX`, a random byte string is generated (you
can read it back via the ``nonce`` attribute).
mac_len : integer
(*Only* `MODE_EAX`). Length of the authentication tag, in bytes.
It must be no larger than 8 (which is the default).
segment_size : integer
(*Only* `MODE_CFB`).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
initial_value : integer
(*Only* `MODE_CTR`). The initial value for the counter within
the counter block. By default it is 0.
:Return: a DES cipher, of the applicable mode:
- CBC_ mode
- CFB_ mode
- CTR_ mode
- EAX_ mode
- ECB_ mode
- OFB_ mode
- OpenPgp_ mode
.. _CBC: Crypto.Cipher._mode_cbc.CbcMode-class.html
.. _CFB: Crypto.Cipher._mode_cfb.CfbMode-class.html
.. _CTR: Crypto.Cipher._mode_ctr.CtrMode-class.html
.. _EAX: Crypto.Cipher._mode_eax.EaxMode-class.html
.. _ECB: Crypto.Cipher._mode_ecb.EcbMode-class.html
.. _OFB: Crypto.Cipher._mode_ofb.OfbMode-class.html
.. _OpenPgp: Crypto.Cipher._mode_openpgp.OpenPgpMode-class.html
"""
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
#: Electronic Code Book (ECB). See `Crypto.Cipher._mode_ecb.EcbMode`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `Crypto.Cipher._mode_cbc.CbcMode`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `Crypto.Cipher._mode_cfb.CfbMode`.
MODE_CFB = 3
#: Output FeedBack (OFB). See `Crypto.Cipher._mode_ofb.OfbMode`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `Crypto.Cipher._mode_ctr.CtrMode`.
MODE_CTR = 6
#: OpenPGP Mode. See `Crypto.Cipher._mode_openpgp.OpenPgpMode`.
MODE_OPENPGP = 7
#: EAX Mode. See `Crypto.Cipher._mode_eax.EaxMode`.
MODE_EAX = 9
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8
| apache-2.0 | -5,694,647,605,484,890,000 | 35.787565 | 79 | 0.613521 | false | 3.815153 | false | false | false |
harikishen/addons-server | src/olympia/amo/tasks.py | 1 | 2584 | import datetime
from django.core.mail import EmailMessage, EmailMultiAlternatives
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.celery import task
from olympia.amo.utils import get_email_backend
from olympia.bandwagon.models import Collection
from olympia.stats.models import Contribution
log = olympia.core.logger.getLogger('z.task')
@task
def send_email(recipient, subject, message, from_email=None,
html_message=None, attachments=None, real_email=False,
cc=None, headers=None, fail_silently=False, async=False,
max_retries=None, reply_to=None, **kwargs):
backend = EmailMultiAlternatives if html_message else EmailMessage
connection = get_email_backend(real_email)
result = backend(subject, message, from_email, to=recipient, cc=cc,
connection=connection, headers=headers,
attachments=attachments, reply_to=reply_to)
if html_message:
result.attach_alternative(html_message, 'text/html')
try:
result.send(fail_silently=False)
return True
except Exception as e:
log.error('send_mail failed with error: %s' % e)
if async:
return send_email.retry(exc=e, max_retries=max_retries)
elif not fail_silently:
raise
else:
return False
@task
def set_modified_on_object(obj, **kw):
"""Sets modified on one object at a time."""
try:
log.info('Setting modified on object: %s, %s' %
(obj.__class__.__name__, obj.pk))
obj.update(modified=datetime.datetime.now())
except Exception, e:
log.error('Failed to set modified on: %s, %s - %s' %
(obj.__class__.__name__, obj.pk, e))
@task
def delete_logs(items, **kw):
log.info('[%s@%s] Deleting logs' % (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=amo.LOG_KEEP).delete()
@task
def delete_stale_contributions(items, **kw):
log.info('[%s@%s] Deleting stale contributions' %
(len(items), delete_stale_contributions.rate_limit))
Contribution.objects.filter(
transaction_id__isnull=True, pk__in=items).delete()
@task
def delete_anonymous_collections(items, **kw):
log.info('[%s@%s] Deleting anonymous collections' %
(len(items), delete_anonymous_collections.rate_limit))
Collection.objects.filter(type=amo.COLLECTION_ANONYMOUS,
pk__in=items).delete()
| bsd-3-clause | 7,679,414,389,111,565,000 | 33 | 76 | 0.64822 | false | 3.64457 | false | false | false |
sctjkc01/ofCourse | ofcourse/participants.py | 1 | 3800 | import os
from datetime import datetime, date, timedelta
from urlparse import urlparse
import yaml
from flask import Blueprint, redirect
from flask.ext.mako import render_template
import ofcourse
from ofcourse.util import app_path, get_hw_keys
participants_bp = Blueprint('participants_bp',
__name__,
template_folder=app_path('templates'))
currentYear = str(date.today().year)
currentTerm = "fall" if date.today().month > 7 else "spring"
@participants_bp.route('/')
def participants_blank():
"""
This is the default landing
for the participants listing page.
It will list all of the participants
in the current term for HFOSS
"""
return participants_year_term(currentYear, currentTerm)
@participants_bp.route('/<year_or_nick>')
def participants_year(year_or_nick):
"""
This will get all the participants
within a given year
"""
p_url = find_participant(year_or_nick)
if p_url is not None:
# render individual page
return redirect(p_url)
# otherwise render as a year
return participants(year_or_nick + '/')
@participants_bp.route('/<year>/<term>')
def participants_year_term(year, term):
"""
This will get all the participants
within a given year and term
"""
return participants(year + '/' + term + '/')
@participants_bp.route('/all')
def participants_all():
return participants('')
"""
This will get all the participants
who have taken HFOSS
"""
def participants(root_dir):
"""
Render the participants page,
which shows a directory of all
the students with their forge
links, blog posts, assignment
links, and etc.
"""
yaml_dir = app_path('people', root_dir)
student_data = []
for dirpath, dirnames, files in os.walk(yaml_dir):
dirpath = dirpath.rstrip("/")
for fname in sorted(files):
if fname.endswith('.yaml'):
with open(dirpath + '/' + fname) as students:
contents = yaml.safe_load(students)
contents['yaml'] = dirpath + '/' + fname
year_term_data = dirpath.split('/')
contents['participant_page'] = "{y}/{t}/{u}".format(
y=year_term_data[-2],
t=year_term_data[-1],
u=os.path.splitext(fname)[0]
)
for forge in contents['forges']:
url = urlparse(forge)
if "github.com" in url.netloc:
contents['github'] = url.path[1:]
contents['isActive'] = (currentYear in year_term_data and
currentTerm in year_term_data)
student_data.append(contents)
assignments = get_hw_keys()
elapsed = (datetime.today() - ofcourse.site.COURSE_START).total_seconds()
target_number = int(elapsed / timedelta(weeks=1).total_seconds() + 1 +
len(assignments))
return render_template(
'blogs.mak', name='mako',
student_data=student_data,
gravatar=ofcourse.site.gravatar,
target_number=target_number,
hw_keys=assignments
)
def find_participant(nick):
yaml_dir = app_path('people')
for dirpath, dirnames, files in os.walk(yaml_dir):
for fname in files:
if (fname.lower().startswith(nick.lower()) and
fname.endswith('.yaml')):
participant = os.path.join(
dirpath,
fname
).replace(yaml_dir, '')
participant = participant.replace('.yaml', '')
return 'participants' + participant
| apache-2.0 | 3,540,628,806,068,801,000 | 28.6875 | 77 | 0.569737 | false | 4.231626 | false | false | false |
smurfix/DaBroker | dabroker/base/transport/__init__.py | 1 | 4226 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from gevent import GreenletExit
from dabroker.util.thread import prep_spawned
import logging
logger = logging.getLogger("dabroker.base.transport")
class ConnectionError(RuntimeError):
pass
class BaseCallbacks(object):
def recv(self,msg):
"""Incoming message from the other side. NOT used for receiving replies!"""
raise NotImplementedError("You need to override {}.recv()".format(self.__class__.__name__))
def send(self,msg):
"""Outgoing message to the other side. NOT used for sending replies!"""
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def ended(self,err=None):
"""Called on receiver error. Do not reconnect here!"""
pass
def reconnect(self,err=None):
"""Called after a closed connection has been cleaned up"""
pass
def register_codec(self,codec):
raise NotImplementedError("You need to override {}.register_codec()".format(self.__class__.__name__))
class RelayedError(Exception):
"""An encapsulation for a server error (with traceback)"""
def __init__(self,err,tb):
self.err = str(err)
self.tb = tb
def __repr__(self):
return "{}({})".format(self.__class__.__name__,self.err)
def __str__(self):
r = repr(self)
if self.tb is None: return r
return r+"\n"+self.tb
class BaseTransport(object):
_job = None
defaults = {}
connection = None
last_msgid = 0
def __init__(self,callbacks, cfg={}):
self.cfg = self.defaults.copy()
self.cfg.update(cfg)
self.callbacks = callbacks
self.trace = cfg.get('trace',0)
def connect(self, purge=False):
"""Connect. (Synchronously.)
Do not override!
Override .connect1() (setup) and .connect2() (initial tasks)"""
assert self.callbacks is not None
assert self.connection is None
self.connect1()
if purge:
self.purge_all()
self.connect2()
def connect1(self):
"""Set up a connection.
Call super() before your code."""
if self._job is not None:
raise RuntimeError("Already connected")
logger.debug("connecting: %r",self)
def connect2(self):
"""Add initial tasks after a connection has been established.
Call super() after your code."""
assert self._job is None
self._job = self._run_job()
self._job.start()
def disconnect(self):
"""Sever the connection; do not auto-reconnect."""
logger.debug("disconnecting: %r",self)
j,self._job = self._job,None
if j:
j.stop()
def disconnected(self, err=None):
"""Clear connection objects.
This will be called by the reader task as it exits.
Do not reconnect from here; do that in your .reconnect"""
logger.debug("disconnected: %r",self)
def purge_all(self):
"""
Clear this transport's message queue.
This should only be called when client and server are known to
be idle AND when you suspect an unprocessable message might
clog the queue.
"""
pass
def send(self,msg):
raise NotImplementedError("You need to override {}.send()".format(self.__class__.__name__))
def run(self):
raise NotImplementedError("You need to override {}.run()".format(self.__class__.__name__))
@prep_spawned
def _run_job(self):
try:
logger.debug("Running receiver loop: %r",self)
self.run()
except GreenletExit:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
except BaseException as e:
err = e
logger.exception("Receiver loop error: %r",self)
self.callbacks.ended(e)
else:
err=None
logger.debug("Receiver loop ends: %r",self)
self.callbacks.ended(None)
finally:
self.disconnected()
if self._job is not None:
self._job = None
self.callbacks.reconnect(err)
| gpl-3.0 | -528,446,127,231,001,700 | 26.769737 | 103 | 0.689647 | false | 3.352661 | false | false | false |
MattFaus/CrowdTube-Connector | youtube.py | 1 | 6824 | import os
import urlparse
from lib import gdata
import lib.gdata.youtube.client
import secrets
GDATA_API_CLIENT_ID = 'CrowdTube-Connector'
class YouTubeCaptionEditor(object):
def __init__(self, google_email, google_password, youtube_username):
self.youtube_username = youtube_username
self.youtube_client = lib.gdata.youtube.client.YouTubeClient()
# We shouldn't need this auth_token, but we'll keep it around
self.auth_token = self.youtube_client.client_login(
google_email, google_password, GDATA_API_CLIENT_ID)
# A dictionary of youtube_id and YouTubeVideo objects
self.videos = {}
def get_videos(self):
# Format copied from lib.gdata.youtube.client.py
feed_uri = '%s%s/%s' % (lib.gdata.youtube.client.YOUTUBE_USER_FEED_URI,
self.youtube_username, 'uploads')
all_videos = self.youtube_client.get_videos(uri=feed_uri)
for video in all_videos.entry:
new_video = YouTubeVideo(video, self.youtube_client)
self.videos[new_video.video_id] = new_video
def get_video(self, video_id):
video_entry = self.youtube_client.get_video_entry(video_id=video_id)
return YouTubeVideo(video_entry, self.youtube_client)
def delete_track(self, video_id, track_id):
"""Deletes an existing track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
response = self.youtube_client.delete_track(video_id, track_id,
client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
# http://docs.python.org/release/2.2.3/lib/httpresponse-objects.html
if response.status != 200:
print response.status, response.msg
return False
return True
def add_track(self, video_id, title, language, track_content):
"""Adds a caption track.
If a track with the same title already exists, this will silently fail.
"""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.create_track(video_id, title, language,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
def update_track(self, video_id, track_id, track_content):
"""Adds a caption track."""
# TODO(mattfaus): Take google_developer_key as a constructor arg?
track_content = track_content.encode('utf-8')
response = self.youtube_client.update_track(video_id, track_id,
track_content, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key, fmt='sub')
# Returns a TrackEntry object
return response
# TODO(mattfaus): Suck these two classes into the YouTubeCaptionEditor, above
# make the YouTubeCaptionEditor behave more like a full-fledged youtube client
# Shouldn't have to pass the youtube_client object around to the sub-classes
# No need to have dictionaries where an array would do just fine (YouTubeVideo.caption_tracks)
class YouTubeVideo(object):
def __init__(self, video_entry, youtube_client=None):
self.youtube_client = youtube_client
# tag:youtube.com,2008:video:SNrEiiJwD4Y
id_parts = video_entry.GetId().split(':')
self.video_id = id_parts[id_parts.index('video') + 1]
self.title = video_entry.title.text
caption_link = video_entry.get_link(
'http://gdata.youtube.com/schemas/2007#video.captionTracks')
self.caption_feed = caption_link.href
# TODO(mattfaus): Make this less ugly
has_entries = [
a.value for a in caption_link.GetAttributes()
if '{http://gdata.youtube.com/schemas/2007}hasEntries' == a._qname]
has_entries = has_entries[0] == 'true'
self.has_entries = has_entries
self.caption_tracks = {}
def get_caption_tracks(self, download=False):
# Don't check self.has_entries. It may be False when only a
# machine-generated caption track exists.
if not self.youtube_client:
raise ValueError('No youtube client available!')
# STOPSHIP(mattfaus): get_caption_feed() only returns the first 24 caption tracks
# so we must iterate to read more
# TODO(mattfaus): Filter this by language with the 'lr' attribute
all_captions = self.youtube_client.get_caption_feed(self.caption_feed)
for caption_entry in all_captions.entry:
new_track = YouTubeCaptionTrack(caption_entry, self.youtube_client)
self.caption_tracks[new_track.track_source] = new_track
if download:
new_track.download_track()
def get_machine_generated_track(self):
self.get_caption_tracks()
for src, caption_track in self.caption_tracks.iteritems():
print src, caption_track
if caption_track.machine_generated:
caption_track.download_track()
return caption_track
class YouTubeCaptionTrack(object):
def __init__(self, caption_entry, youtube_client):
self.youtube_client = youtube_client
self.language = caption_entry.content.lang
self.track_source = caption_entry.content.src
self.machine_generated = YouTubeCaptionTrack._is_machine_generated(
caption_entry)
# Parse the video_id and caption_id out of a url like this:
# https://gdata.youtube.com/feeds/api/videos/Jom6EtXzRMg/captiondata/Ch4LEO3ZhwUaFQjIic2vrcLuxCYSAmVuGgAiA2Fzcgw
o = urlparse.urlparse(self.track_source)
path_parts = o.path.split('/')
self.video_id = path_parts[path_parts.index('videos') + 1]
self.track_id = path_parts[path_parts.index('captiondata') + 1]
self.track_content = None
@staticmethod
def _is_machine_generated(caption_entry):
"""Looks for the derived element, and returns True if it is equal to
speechRecognition.
"""
# TODO(mattfaus): Move this to TrackEntry within youtube/data.py?
derived = caption_entry.GetElements(
tag='derived', namespace='http://gdata.youtube.com/schemas/2007')
if not derived:
return False
else:
derived = derived[0]
return derived.text == 'speechRecognition'
def download_track(self):
response = self.youtube_client.get_caption_track(
track_url=self.track_source, client_id=GDATA_API_CLIENT_ID,
developer_key=secrets.google_developer_key)
self.track_content = response.read(2 ** 31)
return self.track_content
| mit | -7,013,494,189,144,412,000 | 38.445087 | 120 | 0.651231 | false | 3.822969 | false | false | false |
rockfruit/bika.lims | bika/lims/browser/analysisrequest/results_not_requested.py | 1 | 2747 | # This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl import getSecurityManager
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.permissions import *
from bika.lims.browser.analysisrequest import AnalysisRequestManageResultsView
from bika.lims.content.analysisrequest import schema as AnalysisRequestSchema
from bika.lims.utils import to_utf8
from bika.lims.workflow import doActionFor
from plone.app.layout.globals.interfaces import IViewView
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
import plone
class AnalysisRequestResultsNotRequestedView(AnalysisRequestManageResultsView):
implements(IViewView)
template = ViewPageTemplateFile("templates/analysisrequest_analyses_not_requested.pt")
def __call__(self):
ar = self.context
workflow = getToolByName(ar, 'portal_workflow')
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \
and ar.getChildAnalysisRequest() or None
childid = childar and childar.getRequestID() or None
message = _('This Analysis Request has been withdrawn and is shown '
'for trace-ability purposes only. Retest: ${retest_child_id}.',
mapping={"retest_child_id":childid if childid else ''})
self.context.plone_utils.addPortalMessage(message, 'warning')
# If is an AR automatically generated due to a Retraction, show it's
# parent AR information
if hasattr(ar, 'getParentAnalysisRequest') \
and ar.getParentAnalysisRequest():
par = ar.getParentAnalysisRequest()
message = _(
'This Analysis Request has been generated automatically due to '
'the retraction of the Analysis Request ${retracted_request_id}.',
mapping={"retracted_request_id": par.getRequestID()})
self.context.plone_utils.addPortalMessage(message, 'info')
can_do = getSecurityManager().checkPermission(ResultsNotRequested, ar)
if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled":
self.request.response.redirect(ar.absolute_url())
elif not(can_do):
self.request.response.redirect(ar.absolute_url())
else:
return self.template()
| agpl-3.0 | 3,269,595,701,656,959,500 | 46.362069 | 90 | 0.699672 | false | 4.174772 | false | false | false |
LongSeanSilvr/DC_Metro_Tracker | development_version/src/general_intents.py | 1 | 1923 | import build_response as br
# ======================================================================================================================
# Skill Behavior: Welcome Response
# ======================================================================================================================
class Welcome(object):
def __init__(self):
self.card_title = "Welcome"
self.reprompt_text = "What station would you like train times for?"
self.flag = "welcome"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Help
# ======================================================================================================================
class Help(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Help"
self.reprompt_text = "What station would you like train times for?"
self.flag = "help"
def build_response(self):
output = br.build_response(self.card_title, self.flag, reprompt_text=self.reprompt_text)
return output
# ======================================================================================================================
# Skill Intent: Quit
# ======================================================================================================================
class Exit(object):
def __init__(self, intent, session): # Parameters are here so handler can treat this like the other intent classes
self.card_title = "Exiting"
self.flag = "exit"
def build_response(self):
output = br.build_response(self.card_title, self.flag)
return output
| gpl-3.0 | 3,137,623,135,016,649,700 | 44.785714 | 120 | 0.411856 | false | 5.494286 | false | false | false |
dakrauth/picker | picker/forms.py | 1 | 6144 | from django import forms
from django.utils import timezone
from django.utils.module_loading import import_string
from . import models as picker
from . import utils
_picker_widget = None
encoded_game_key = 'game_{}'.format
TIE_KEY = '__TIE__'
def decoded_game_key(value):
return int(value.replace('game_', ''))
def encoded_game_item(game):
return (
encoded_game_key(game.id),
str(game.winner.id) if game.winner else (TIE_KEY if game.is_tie else '')
)
def get_picker_widget(league):
global _picker_widget
if not _picker_widget:
widget_path = league.config('TEAM_PICKER_WIDGET')
if widget_path:
_picker_widget = import_string(widget_path)
_picker_widget = _picker_widget or forms.RadioSelect
return _picker_widget
class GameField(forms.ChoiceField):
def __init__(self, game, manage=False, widget=None):
choices = [(str(game.away.id), game.away), (str(game.home.id), game.home)]
if manage:
choices.insert(1, (TIE_KEY, ''))
self.game = game
self.manage = manage
self.game_id = game.id
self.is_game = True
super(GameField, self).__init__(
choices=choices,
label=game.start_time.strftime('%a, %b %d %I:%M %p'),
required=False,
help_text=game.tv,
disabled=not self.manage and (self.game.start_time <= timezone.now()),
widget=widget or get_picker_widget(game.gameset.league)
)
class FieldIter:
def __init__(self, form):
self.fields = []
self.form = form
def append(self, name):
self.fields.append(name)
def __iter__(self):
for name in self.fields:
yield self.form[name]
class BasePickForm(forms.Form):
management = False
def __init__(self, gameset, *args, **kws):
super(BasePickForm, self).__init__(*args, **kws)
self.gameset = gameset
self.game_fields = FieldIter(self)
games = list(gameset.games.select_related('home__league', 'away__league'))
if games:
for gm in games:
key = encoded_game_key(gm.id)
self.fields[key] = GameField(gm, self.management)
self.game_fields.append(key)
self.fields['points'] = forms.IntegerField(
label='{}:'.format(games[-1].vs_description),
required=False
)
class ManagementPickForm(BasePickForm):
management = True
def __init__(self, gameset, *args, **kws):
kws.setdefault('initial', {}).update(**self.get_initial_picks(gameset))
super(ManagementPickForm, self).__init__(gameset, *args, **kws)
def save(self):
gameset = self.gameset
data = self.cleaned_data.copy()
gameset.points = data.pop('points', 0) or 0
gameset.save()
for key, winner in data.items():
if winner:
pk = decoded_game_key(key)
game = gameset.games.get(pk=pk)
game.winner = None if winner == TIE_KEY else int(winner)
gameset.update_pick_status()
@staticmethod
def get_initial_picks(gameset):
return dict({
encoded_game_key(game.id): str(game.winner.id)
for game in gameset.games.played()
if game.winner
}, points=gameset.points)
class UserPickForm(BasePickForm):
def __init__(self, user, gameset, *args, **kws):
initial = self.get_initial_user_picks(gameset, user)
kws.setdefault('initial', {}).update(initial)
self.user = user
super(UserPickForm, self).__init__(gameset, *args, **kws)
def save(self):
data = self.cleaned_data.copy()
picks = picker.PickSet.objects.for_gameset_user(self.gameset, self.user)
points = data.pop('points', None)
games = {decoded_game_key(k): v for k, v in data.items() if v}
picks.update_picks(games=games, points=points)
return picks
@staticmethod
def get_initial_user_picks(gameset, user):
ps = gameset.pick_for_user(user)
initial = dict({
encoded_game_key(g_id): str(w_id) for g_id, w_id in ps.gamepicks.picked_winner_ids()
}, points=ps.points) if ps else {}
return initial
class GameForm(forms.ModelForm):
class Meta:
model = picker.Game
fields = ('start_time', 'location')
class PreferenceForm(forms.ModelForm):
class Meta:
model = picker.Preference
fields = ('autopick',)
def __init__(self, instance, *args, **kws):
kws['instance'] = instance
self.current_email = instance.user.email.lower()
kws.setdefault('initial', {})['email'] = self.current_email
super(PreferenceForm, self).__init__(*args, **kws)
for league in picker.League.objects.all():
field_name = '{}_favorite'.format(league.slug)
current = None
if instance:
try:
current = picker.PickerFavorite.objects.get(user=instance.user, league=league)
except picker.PickerFavorite.DoesNotExist:
pass
self.fields[field_name] = forms.ModelChoiceField(
picker.Team.objects.filter(league=league),
label='{} Favorite'.format(league.abbr.upper()),
empty_label='-- Select --',
required=False,
initial=current.team if current else None
)
def save(self, commit=True):
super(PreferenceForm, self).save(commit)
if commit:
picker.PickerFavorite.objects.filter(user=self.instance.user).delete()
for key in self.cleaned_data:
if not key.endswith('_favorite'):
continue
slug = key.rsplit('_')[0]
league = picker.League.objects.get(slug=slug)
picker.PickerFavorite.objects.create(
league=league,
user=self.instance.user,
team=self.cleaned_data[key]
)
| mit | -7,155,869,303,144,028,000 | 30.187817 | 98 | 0.57487 | false | 3.719128 | false | false | false |
amerlyq/piony | piony/config/argparser.py | 1 | 2747 | from argparse import ArgumentParser, RawDescriptionHelpFormatter
import piony
from piony.common.exceptions import InputError
class ArgParser(object):
def __init__(self):
self.ps = ArgumentParser(prog=piony.__appname__,
formatter_class=RawDescriptionHelpFormatter,
description=piony.__doc__, epilog="Enjoy!!!")
self._setup_options()
def parse(self, argv):
if not argv:
argv = []
elif isinstance(argv, str):
argv = argv.split()
elif not isinstance(argv, list):
raise InputError("Wrong argv type: {}".format(type(argv)))
return self.ps.parse_args(argv)
def apply(self, args):
from operator import xor
res = (False, False)
dbg = {'a': (True, True), 'v': (True, False), 'k': (False, True)}
if args.verbose:
for entry in args.verbose:
res = map(xor, res, dbg[entry])
piony.G_DEBUG_VISUALS, piony.G_DEBUG_ACTIONS = res
def _setup_options(self):
## Configuration
farg = self.ps.add_argument
farg('buds', metavar='bud', nargs='*', type=str, default=None,
help="Setup profile layout in json directly on cmdline. "
"Can be specified several times -- one for each slice. "
"Or use pathes to files with slices inside.")
farg('-v', '--version', action='version', default=None,
version="%(prog)s {0}".format(piony.__version__),
help="Version of program.")
gr_window = self.ps.add_argument_group('Window')
warg = gr_window.add_argument
warg('-c', '--config', default=None,
help="Config file with default settings.")
warg('-p', '--print', default=None,
help="Toggle action print/execute to use as frontend only.")
## Appearance
warg('-s', '--size', type=int, default=None,
help="Sets window size WxH=NxN to derive all rings sizes from it.")
warg('-F', '--fullscreen', action='store_true', default=None,
help="Overlay fullscreen/local")
warg('-T', '--no-tooltip', action='store_true', default=None,
help="Disable pop-up items, for those who is irritated.")
## Process
gr_general = self.ps.add_argument_group('General')
garg = gr_general.add_argument
garg('-k', '--kill', action='store_true', default=None,
help="Kill running daemonized program.")
garg('-V', '--verbose', nargs='?', type=str,
const='a', choices=['a', 'v', 'k'], default=None,
help="Verbose (debug): [a]ll (default), [v]isuals, [k]eys.")
| gpl-3.0 | 114,584,023,838,943,360 | 41.261538 | 80 | 0.560612 | false | 4.004373 | false | false | false |
strahlc/exaile | xlgui/main.py | 1 | 43837 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import datetime
import logging
import os
import re
import threading
import cairo
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Pango
from xl.nls import gettext as _
from xl import (
common,
covers,
event,
formatter,
player,
playlist,
providers,
settings,
trax,
xdg
)
from xlgui.accelerators import AcceleratorManager
from xlgui.playlist_container import PlaylistContainer
from xlgui.widgets import (
dialogs,
info,
menu,
playback
)
from xlgui.widgets.playlist import (
PlaylistPage,
PlaylistView
)
from xlgui import (
guiutil,
tray,
menu as mainmenu
)
logger = logging.getLogger(__name__)
# Length of playback step when user presses seek key (sec)
SEEK_STEP_DEFAULT = 10
# Length of volume steps when user presses up/down key
VOLUME_STEP_DEFAULT = 0.1
class MainWindow(GObject.GObject):
"""
Main Exaile Window
"""
__gproperties__ = {
'is-fullscreen': (bool, 'Fullscreen',
'Whether the window is fullscreen.',
False, # Default
GObject.PARAM_READWRITE),
}
__gsignals__ = {'main-visible-toggle': (GObject.SignalFlags.RUN_LAST, bool, ())}
_mainwindow = None
def __init__(self, controller, builder, collection):
"""
Initializes the main window
@param controller: the main gui controller
"""
GObject.GObject.__init__(self)
self.controller = controller
self.collection = collection
self.playlist_manager = controller.exaile.playlists
self.current_page = -1
self._fullscreen = False
self.resuming = False
self.window_state = 0
self.minimized = False
self.builder = builder
self.window = self.builder.get_object('ExaileWindow')
self.window.set_title('Exaile')
self.title_formatter = formatter.TrackFormatter(settings.get_option(
'gui/main_window_title_format', _('$title (by $artist)') +
' - Exaile'))
self.accelgroup = Gtk.AccelGroup()
self.window.add_accel_group(self.accelgroup)
self.accel_manager = AcceleratorManager('mainwindow-accelerators', self.accelgroup)
self.menubar = self.builder.get_object("mainmenu")
fileitem = self.builder.get_object("file_menu_item")
filemenu = menu.ProviderMenu('menubar-file-menu', self)
fileitem.set_submenu(filemenu)
edititem = self.builder.get_object("edit_menu_item")
editmenu = menu.ProviderMenu('menubar-edit-menu', self)
edititem.set_submenu(editmenu)
viewitem = self.builder.get_object("view_menu_item")
viewmenu = menu.ProviderMenu('menubar-view-menu', self)
viewitem.set_submenu(viewmenu)
toolsitem = self.builder.get_object("tools_menu_item")
toolsmenu = menu.ProviderMenu('menubar-tools-menu', self)
toolsitem.set_submenu(toolsmenu)
helpitem = self.builder.get_object("help_menu_item")
helpmenu = menu.ProviderMenu('menubar-help-menu', self)
helpitem.set_submenu(helpmenu)
self._setup_widgets()
self._setup_position()
self._setup_hotkeys()
logger.info("Connecting main window events...")
self._connect_events()
MainWindow._mainwindow = self
mainmenu._create_menus()
def _setup_hotkeys(self):
"""
Sets up accelerators that haven't been set up in UI designer
"""
hotkeys = (
('<Control>S', lambda *e: self.on_save_playlist()),
('<Shift><Control>S', lambda *e: self.on_save_playlist_as()),
('<Control>F', lambda *e: self.on_panel_filter_focus()),
('<Control>G', lambda *e: self.on_search_playlist_focus()), # FIXME
('<Control><Alt>l', lambda *e: player.QUEUE.clear()), # FIXME
('<Control>P', self._on_playpause_button),
('<Control>Right', lambda *e: self._on_seek_key(True)),
('<Control>Left', lambda *e: self._on_seek_key(False)),
('<Control>plus', lambda *e: self._on_volume_key(True)),
('<Control>minus', lambda *e: self._on_volume_key(False)),
('<Control>Page_Up', self._on_prev_tab_key),
('<Control>Page_Down', self._on_next_tab_key),
('<Alt>N', self._on_focus_playlist_container),
# These 4 are subject to change.. probably should do this
# via a different mechanism too...
('<Alt>I', lambda *e: self.controller.focus_panel('files')),
#('<Alt>C', lambda *e: self.controller.focus_panel('collection')),
('<Alt>R', lambda *e: self.controller.focus_panel('radio')),
('<Alt>L', lambda *e: self.controller.focus_panel('playlists')),
('<Alt>1', lambda *e: self._on_focus_playlist_tab(0)),
('<Alt>2', lambda *e: self._on_focus_playlist_tab(1)),
('<Alt>3', lambda *e: self._on_focus_playlist_tab(2)),
('<Alt>4', lambda *e: self._on_focus_playlist_tab(3)),
('<Alt>5', lambda *e: self._on_focus_playlist_tab(4)),
('<Alt>6', lambda *e: self._on_focus_playlist_tab(5)),
('<Alt>7', lambda *e: self._on_focus_playlist_tab(6)),
('<Alt>8', lambda *e: self._on_focus_playlist_tab(7)),
('<Alt>9', lambda *e: self._on_focus_playlist_tab(8)),
('<Alt>0', lambda *e: self._on_focus_playlist_tab(9)),
)
self.accel_group = Gtk.AccelGroup()
for key, function in hotkeys:
key, mod = Gtk.accelerator_parse(key)
self.accel_group.connect(key, mod, Gtk.AccelFlags.VISIBLE,
function)
self.window.add_accel_group(self.accel_group)
def _setup_widgets(self):
"""
Sets up the various widgets
"""
# TODO: Maybe make this stackable
self.message = dialogs.MessageBar(
parent=self.builder.get_object('player_box'),
buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.info_area = MainWindowTrackInfoPane(player.PLAYER)
self.info_area.set_auto_update(True)
self.info_area.set_padding(3, 3, 3, 3)
self.info_area.hide()
self.info_area.set_no_show_all(True)
guiutil.gtk_widget_replace(self.builder.get_object('info_area'), self.info_area)
self.volume_control = playback.VolumeControl(player.PLAYER)
self.info_area.get_action_area().pack_end(self.volume_control, False, False, 0)
self.alpha_style = None
if settings.get_option('gui/use_alpha', False):
screen = self.window.get_screen()
visual = screen.get_rgba_visual()
self.window.set_visual(visual)
self.window.connect('screen-changed', self.on_screen_changed)
self.alpha_style = Gtk.CssProvider.new()
self.window.get_style_context().add_provider(self.alpha_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self._update_alpha()
playlist_area = self.builder.get_object('playlist_area')
self.playlist_container = PlaylistContainer('saved_tabs', player.PLAYER)
for notebook in self.playlist_container.notebooks:
notebook.connect_after('switch-page', self.on_playlist_container_switch_page)
page = notebook.get_current_tab()
if page is not None:
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
playlist_area.pack_start(self.playlist_container, True, True, 3)
self.splitter = self.builder.get_object('splitter')
# In most (all?) RTL locales, the playback controls should still be LTR.
# Just in case that's not always the case, we provide a hidden option to
# force RTL layout instead. This can be removed once we're more certain
# that the default behavior (always LTR) is correct.
controls_direction = Gtk.TextDirection.RTL \
if settings.get_option('gui/rtl_playback_controls') \
else Gtk.TextDirection.LTR
self.play_image = Gtk.Image.new_from_icon_name('media-playback-start',
Gtk.IconSize.SMALL_TOOLBAR)
self.play_image.set_direction(controls_direction)
self.pause_image = Gtk.Image.new_from_icon_name('media-playback-pause',
Gtk.IconSize.SMALL_TOOLBAR)
self.pause_image.set_direction(controls_direction)
play_toolbar = self.builder.get_object('play_toolbar')
play_toolbar.set_direction(controls_direction)
for button in ('playpause', 'next', 'prev', 'stop'):
widget = self.builder.get_object('%s_button' % button)
setattr(self, '%s_button' % button, widget)
widget.get_child().set_direction(controls_direction)
self.progress_bar = playback.SeekProgressBar(player.PLAYER)
self.progress_bar.get_child().set_direction(controls_direction)
# Don't expand vertically; looks awful on Adwaita.
self.progress_bar.set_valign(Gtk.Align.CENTER)
guiutil.gtk_widget_replace(
self.builder.get_object('playback_progressbar_dummy'),
self.progress_bar
)
self.stop_button.toggle_spat = False
self.stop_button.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.stop_button.connect('motion-notify-event',
self.on_stop_button_motion_notify_event)
self.stop_button.connect('leave-notify-event',
self.on_stop_button_leave_notify_event)
self.stop_button.connect('key-press-event',
self.on_stop_button_key_press_event)
self.stop_button.connect('key-release-event',
self.on_stop_button_key_release_event)
self.stop_button.connect('focus-out-event',
self.on_stop_button_focus_out_event)
self.stop_button.connect('button-press-event',
self.on_stop_button_press_event)
self.stop_button.connect('button-release-event',
self.on_stop_button_release_event)
self.stop_button.drag_dest_set(Gtk.DestDefaults.ALL,
[Gtk.TargetEntry.new("exaile-index-list", Gtk.TargetFlags.SAME_APP, 0)], Gdk.DragAction.COPY)
self.stop_button.connect('drag-motion',
self.on_stop_button_drag_motion)
self.stop_button.connect('drag-leave',
self.on_stop_button_drag_leave)
self.stop_button.connect('drag-data-received',
self.on_stop_button_drag_data_received)
self.statusbar = info.Statusbar(self.builder.get_object('status_bar'))
event.add_ui_callback(self.on_exaile_loaded, 'exaile_loaded')
def _connect_events(self):
"""
Connects the various events to their handlers
"""
self.builder.connect_signals({
'on_configure_event': self.configure_event,
'on_window_state_event': self.window_state_change_event,
'on_delete_event': self.on_delete_event,
'on_playpause_button_clicked': self._on_playpause_button,
'on_next_button_clicked':
lambda *e: player.QUEUE.next(),
'on_prev_button_clicked':
lambda *e: player.QUEUE.prev(),
'on_about_item_activate': self.on_about_item_activate,
# Controller
# 'on_scan_collection_item_activate': self.controller.on_rescan_collection,
# 'on_device_manager_item_activate': lambda *e: self.controller.show_devices(),
# 'on_track_properties_activate':self.controller.on_track_properties,
})
event.add_ui_callback(self.on_playback_resume, 'playback_player_resume',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_player_end',
player.PLAYER)
event.add_ui_callback(self.on_playback_end, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playback_start, 'playback_track_start',
player.PLAYER)
event.add_ui_callback(self.on_toggle_pause, 'playback_toggle_pause',
player.PLAYER)
event.add_ui_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_ui_callback(self.on_buffering, 'playback_buffering',
player.PLAYER)
event.add_ui_callback(self.on_playback_error, 'playback_error',
player.PLAYER)
event.add_ui_callback(self.on_playlist_tracks_added,
'playlist_tracks_added')
event.add_ui_callback(self.on_playlist_tracks_removed,
'playlist_tracks_removed')
# Settings
self._on_option_set('gui_option_set', settings, 'gui/show_info_area')
self._on_option_set('gui_option_set', settings, 'gui/show_info_area_covers')
event.add_ui_callback(self._on_option_set, 'option_set')
def _connect_panel_events(self):
"""
Sets up panel events
"""
# When there's nothing in the notebook, hide it
self.controller.panel_notebook.connect('page-added', self.on_panel_notebook_add_page)
self.controller.panel_notebook.connect('page-removed', self.on_panel_notebook_remove_page)
# panels
panels = self.controller.panel_notebook.panels
for panel_name in ('playlists', 'radio', 'files', 'collection'):
panel = panels[panel_name].panel
sort = False
if panel_name in ('files', 'collection'):
sort = True
panel.connect('append-items', lambda panel, items, force_play, sort=sort:
self.on_append_items(items, force_play, sort=sort))
panel.connect('queue-items', lambda panel, items, sort=sort:
self.on_append_items(items, queue=True, sort=sort))
panel.connect('replace-items', lambda panel, items, sort=sort:
self.on_append_items(items, replace=True, sort=sort))
## Collection Panel
panel = panels['collection'].panel
panel.connect('collection-tree-loaded', self.on_collection_tree_loaded)
## Playlist Panel
panel = panels['playlists'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Radio Panel
panel = panels['radio'].panel
panel.connect('playlist-selected',
lambda panel, playlist:
self.playlist_container.create_tab_from_playlist(playlist))
## Files Panel
#panel = panels['files']
def _update_alpha(self):
if self.alpha_style is None:
return
opac = 1.0 - float(settings.get_option('gui/transparency'))
self.alpha_style.load_from_data(
'.background { ' +
('background-color: alpha(@theme_bg_color, %s);' % opac) +
'}'
)
def do_get_property(self, prop):
if prop.name == 'is-fullscreen':
return self._fullscreen
else:
return GObject.GObject.do_get_property(self, prop)
def do_set_property(self, prop, value):
if prop.name == 'is-fullscreen':
if value:
self.window.fullscreen()
else:
self.window.unfullscreen()
else:
GObject.GObject.do_set_property(self, prop, value)
def on_screen_changed(self, widget, event):
"""
Updates the colormap on screen change
"""
screen = widget.get_screen()
visual = screen.get_rgba_visual() or screen.get_rgb_visual()
self.window.set_visual(visual)
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_panel_notebook_add_page(self, notebook, page, page_num):
if self.splitter.get_child1() is None:
self.splitter.pack1(self.controller.panel_notebook)
self.controller.panel_notebook.get_parent() \
.child_set_property(self.controller.panel_notebook, 'shrink', False)
def on_panel_notebook_remove_page(self, notebook, page, page_num):
if notebook.get_n_pages() == 0:
self.splitter.remove(self.controller.panel_notebook)
def on_stop_button_motion_notify_event(self, widget, event):
"""
Sets the hover state and shows SPAT icon
"""
widget.__hovered = True
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
else:
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_leave_notify_event(self, widget, event):
"""
Unsets the hover state and resets the button icon
"""
widget.__hovered = False
if not widget.is_focus() and \
~(event.get_state() & Gdk.ModifierType.SHIFT_MASK):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_key_press_event(self, widget, event):
"""
Shows SPAT icon on Shift key press
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = True
if event.keyval in (Gdk.KEY_space, Gdk.KEY_Return):
if widget.toggle_spat:
self.on_spat_clicked()
else:
player.PLAYER.stop()
def on_stop_button_key_release_event(self, widget, event):
"""
Resets the button icon
"""
if event.keyval in (Gdk.KEY_Shift_L, Gdk.KEY_Shift_R):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
widget.toggle_spat = False
def on_stop_button_focus_out_event(self, widget, event):
"""
Resets the button icon unless
the button is still hovered
"""
if not getattr(widget, '__hovered', False):
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_press_event(self, widget, event):
"""
Called when the user clicks on the stop button
"""
if event.button == 1:
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.on_spat_clicked()
elif event.button == 3:
menu = guiutil.Menu()
menu.append(_("Toggle: Stop after Selected Track"),
self.on_spat_clicked,
'process-stop')
menu.popup(None, None, None, None, event.button, event.time)
def on_stop_button_release_event(self, widget, event):
"""
Called when the user releases the mouse from the stop button
"""
rect = widget.get_allocation()
if 0 <= event.x < rect.width and 0 <= event.y < rect.height:
player.PLAYER.stop()
def on_stop_button_drag_motion(self, widget, context, x, y, time):
"""
Indicates possible SPAT during drag motion of tracks
"""
target = widget.drag_dest_find_target(context, widget.drag_dest_get_target_list()).name()
if target == 'exaile-index-list':
widget.set_image(Gtk.Image.new_from_icon_name(
'process-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_leave(self, widget, context, time):
"""
Resets the stop button
"""
widget.set_image(Gtk.Image.new_from_icon_name(
'media-playback-stop', Gtk.IconSize.BUTTON))
def on_stop_button_drag_data_received(self, widget, context, x, y, selection, info, time):
"""
Allows for triggering the SPAT feature
by dropping tracks on the stop button
"""
source_widget = Gtk.drag_get_source_widget(context)
if selection.target.name() == 'exaile-index-list' and isinstance(source_widget, PlaylistView):
position = int(selection.data.split(',')[0])
if position == source_widget.playlist.spat_position:
position = -1
source_widget.playlist.spat_position = position
source_widget.queue_draw()
def on_spat_clicked(self, *e):
"""
Called when the user clicks on the SPAT item
"""
trs = self.get_selected_page().view.get_selected_items()
if not trs: return
# TODO: this works, but implement this some other way in the future
if player.QUEUE.current_playlist.spat_position == -1:
player.QUEUE.current_playlist.spat_position = trs[0][0]
else:
player.QUEUE.current_playlist.spat_position = -1
self.get_selected_page().view.queue_draw()
def on_append_items(self, tracks, force_play=False, queue=False, sort=False, replace=False):
"""
Called when a panel (or other component)
has tracks to append and possibly queue
:param tracks: The tracks to append
:param force_play: Force playing the first track if there
is no track currently playing. Otherwise
check a setting to determine whether the
track should be played
:param queue: Additionally queue tracks
:param sort: Sort before adding
:param replace: Clear playlist before adding
"""
if len(tracks) == 0:
return
page = self.get_selected_page()
if sort:
tracks = trax.sort_tracks(
('artist', 'date', 'album', 'discnumber', 'tracknumber'),
tracks)
if replace:
page.playlist.clear()
offset = len(page.playlist)
page.playlist.extend(tracks)
# extending the queue automatically starts playback
if queue:
if player.QUEUE is not page.playlist:
player.QUEUE.extend(tracks)
elif (force_play or settings.get_option( 'playlist/append_menu_starts_playback', False )) and \
not player.PLAYER.current:
page.view.play_track_at(offset, tracks[0])
def on_playback_error(self, type, player, message):
"""
Called when there has been a playback error
"""
self.message.show_error(_('Playback error encountered!'), message)
def on_buffering(self, type, player, percent):
"""
Called when a stream is buffering
"""
percent = min(percent, 100)
self.statusbar.set_status(_("Buffering: %d%%...") % percent, 1)
def on_track_tags_changed(self, type, track, tag):
"""
Called when tags are changed
"""
if track is player.PLAYER.current:
self._update_track_information()
def on_collection_tree_loaded(self, tree):
"""
Updates information on collection tree load
"""
self.statusbar.update_info()
def on_exaile_loaded(self, event_type, exaile, nothing):
"""
Updates information on exaile load
"""
self.statusbar.update_info()
event.remove_callback(self.on_exaile_loaded, 'exaile_loaded')
def on_playlist_tracks_added(self, type, playlist, tracks):
"""
Updates information on track add
"""
self.statusbar.update_info()
def on_playlist_tracks_removed(self, type, playlist, tracks):
"""
Updates information on track removal
"""
self.statusbar.update_info()
def on_toggle_pause(self, type, player, object):
"""
Called when the user clicks the play button after playback has
already begun
"""
if player.is_paused():
image = self.play_image
tooltip = _('Continue Playback')
else:
image = self.pause_image
tooltip = _('Pause Playback')
self.playpause_button.set_image(image)
self.playpause_button.set_tooltip_text(tooltip)
self._update_track_information()
def on_playlist_container_switch_page(self, notebook, page, page_num):
"""
Updates info after notebook page switch
"""
page = notebook.get_nth_page(page_num)
selection = page.view.get_selection()
selection.connect('changed', self.on_playlist_view_selection_changed)
self.statusbar.update_info()
def on_playlist_view_selection_changed(self, selection):
"""
Updates info after playlist page selection change
"""
self.statusbar.update_info()
def on_panel_filter_focus(self, *e):
"""
Gives focus to the filter field of the current panel
"""
try:
self.controller.get_active_panel().filter.grab_focus()
except (AttributeError, KeyError):
pass
def on_search_playlist_focus(self, *e):
"""
Gives focus to the playlist search bar
"""
plpage = get_selected_playlist()
if plpage:
plpage.get_search_entry().grab_focus()
def on_save_playlist(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save dialog of the currently selected playlist tab if
not custom, saves changes directly if custom
"""
tab = self.get_selected_tab()
if not tab: return
if tab.page.playlist.get_is_custom():
tab.do_save_changes_to_custom()
else:
tab.do_save_custom()
def on_save_playlist_as(self, *e):
"""
Called when the user presses Ctrl+S
Spawns the save as dialog of the current playlist tab
"""
tab = self.get_selected_tab()
if not tab: return
tab.do_save_custom()
def on_clear_playlist(self, *e):
"""
Clears the current playlist tab
"""
page = self.get_selected_page()
if page:
page.playlist.clear()
def on_open_item_activate(self, menuitem):
"""
Shows a dialog to open media
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.MediaOpenDialog(self.window)
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_open_url_item_activate(self, menuitem):
"""
Shows a dialog to open an URI
"""
def on_uri_selected(dialog, uri):
self.controller.open_uri(uri, play=False)
dialog = dialogs.URIOpenDialog(self.window)
dialog.connect('uri-selected', on_uri_selected)
dialog.show()
def on_open_directories_item_activate(self, menuitem):
"""
Shows a dialog to open directories
"""
def on_uris_selected(dialog, uris):
uris.reverse()
if len(uris) > 0:
self.controller.open_uri(uris.pop(), play=True)
for uri in uris:
self.controller.open_uri(uri, play=False)
dialog = dialogs.DirectoryOpenDialog(self.window)
# Selecting empty folders is useless
dialog.props.create_folders = False
dialog.connect('uris-selected', on_uris_selected)
dialog.show()
def on_export_current_playlist_activate(self, menuitem):
"""
Shows a dialog to export the current playlist
"""
page = self.get_selected_page()
if not page or not isinstance(page, PlaylistPage):
return
def on_message(dialog, message_type, message):
"""
Show messages in the main window message area
"""
if message_type == Gtk.MessageType.INFO:
self.message.show_info(markup=message)
elif message_type == Gtk.MessageType.ERROR:
self.message.show_error(_('Playlist export failed!'), message)
return True
dialog = dialogs.PlaylistExportDialog(page.playlist, self.window)
dialog.connect('message', on_message)
dialog.show()
def on_playlist_utilities_bar_visible_toggled(self, checkmenuitem):
"""
Shows or hides the playlist utilities bar
"""
settings.set_option('gui/playlist_utilities_bar_visible',
checkmenuitem.get_active())
def on_show_playing_track_item_activate(self, menuitem):
"""
Tries to show the currently playing track
"""
self.playlist_container.show_current_track()
def on_about_item_activate(self, menuitem):
"""
Shows the about dialog
"""
dialog = dialogs.AboutDialog(self.window)
dialog.show()
def on_playback_resume(self, type, player, data):
self.resuming = True
def on_playback_start(self, type, player, object):
"""
Called when playback starts
Sets the currently playing track visible in the currently selected
playlist if the user has chosen this setting
"""
if self.resuming:
self.resuming = False
return
self._update_track_information()
self.playpause_button.set_image(self.pause_image)
self.playpause_button.set_tooltip_text(_('Pause Playback'))
def on_playback_end(self, type, player, object):
"""
Called when playback ends
"""
self.window.set_title('Exaile')
self.playpause_button.set_image(self.play_image)
self.playpause_button.set_tooltip_text(_('Start Playback'))
def _on_option_set(self, name, object, option):
"""
Handles changes of settings
"""
if option == 'gui/main_window_title_format':
self.title_formatter.props.format = settings.get_option(
option, self.title_formatter.props.format)
elif option == 'gui/use_tray':
usetray = settings.get_option(option, False)
if self.controller.tray_icon and not usetray:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
elif not self.controller.tray_icon and usetray:
self.controller.tray_icon = tray.TrayIcon(self)
elif option == 'gui/show_info_area':
self.info_area.set_no_show_all(False)
if settings.get_option(option, True):
self.info_area.show_all()
else:
self.info_area.hide()
self.info_area.set_no_show_all(True)
elif option == 'gui/show_info_area_covers':
cover = self.info_area.cover
cover.set_no_show_all(False)
if settings.get_option(option, True):
cover.show_all()
else:
cover.hide()
cover.set_no_show_all(True)
elif option == 'gui/transparency':
self._update_alpha()
def _on_volume_key(self, is_up):
diff = int(100 * settings.get_option('gui/volue_key_step_size', VOLUME_STEP_DEFAULT))
if not is_up: diff = -diff
player.PLAYER.modify_volume(diff)
return True
def _on_seek_key(self, is_forward):
diff = settings.get_option('gui/seek_key_step_size', SEEK_STEP_DEFAULT)
if not is_forward: diff = -diff
if player.PLAYER.current:
player.PLAYER.modify_time(diff)
self.progress_bar.update_progress()
return True
def _on_prev_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_prev_tab()
return True
def _on_next_tab_key(self, *e):
self.playlist_container.get_current_notebook().select_next_tab()
return True
def _on_playpause_button(self, *e):
self.playpause()
return True
def _on_focus_playlist_tab(self, tab_nr):
self.playlist_container.get_current_notebook().focus_tab(tab_nr)
return True
def _on_focus_playlist_container(self, *_e):
self.playlist_container.focus()
return True
def _update_track_information(self):
"""
Sets track information
"""
track = player.PLAYER.current
if not track:
return
self.window.set_title(self.title_formatter.format(track))
def playpause(self):
"""
Pauses the playlist if it is playing, starts playing if it is
paused. If stopped, try to start playing the next suitable track.
"""
if player.PLAYER.is_paused() or player.PLAYER.is_playing():
player.PLAYER.toggle_pause()
else:
pl = self.get_selected_page()
player.QUEUE.set_current_playlist(pl.playlist)
try:
trackpath = pl.view.get_selected_paths()[0]
pl.playlist.current_position = trackpath[0]
except IndexError:
pass
player.QUEUE.play(track=pl.playlist.current)
def _setup_position(self):
"""
Sets up the position and sized based on the size the window was
when it was last moved or resized
"""
if settings.get_option('gui/mainw_maximized', False):
self.window.maximize()
width = settings.get_option('gui/mainw_width', 500)
height = settings.get_option('gui/mainw_height', 475)
x = settings.get_option('gui/mainw_x', 10)
y = settings.get_option('gui/mainw_y', 10)
self.window.move(x, y)
self.window.resize(width, height)
pos = settings.get_option('gui/mainw_sash_pos', 200)
self.splitter.set_position(pos)
def on_delete_event(self, *e):
"""
Called when the user attempts to close the window
"""
sash_pos = self.splitter.get_position()
if sash_pos > 10:
settings.set_option('gui/mainw_sash_pos', sash_pos)
if settings.get_option('gui/use_tray', False) and \
settings.get_option('gui/close_to_tray', False):
self.window.hide()
else:
self.quit()
return True
def quit(self, *e):
"""
Quits Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit)
return True
def on_restart_item_activate(self, menuitem):
"""
Restarts Exaile
"""
self.window.hide()
GLib.idle_add(self.controller.exaile.quit, True)
def toggle_visible(self, bringtofront=False):
"""
Toggles visibility of the main window
"""
toggle_handled = self.emit('main-visible-toggle')
if not toggle_handled:
if bringtofront and self.window.is_active() or \
not bringtofront and self.window.get_property('visible'):
self.window.hide()
else:
# the ordering for deiconify/show matters -- if this gets
# switched, then the minimization detection breaks
self.window.deiconify()
self.window.show()
def configure_event(self, *e):
"""
Called when the window is resized or moved
"""
# Don't save window size if it is maximized or fullscreen.
if settings.get_option('gui/mainw_maximized', False) or \
self._fullscreen:
return False
(width, height) = self.window.get_size()
if [width, height] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["width", "height"] ]:
settings.set_option('gui/mainw_height', height, save=False)
settings.set_option('gui/mainw_width', width, save=False)
(x, y) = self.window.get_position()
if [x, y] != [ settings.get_option("gui/mainw_"+key, -1) for \
key in ["x", "y"] ]:
settings.set_option('gui/mainw_x', x, save=False)
settings.set_option('gui/mainw_y', y, save=False)
return False
def window_state_change_event(self, window, event):
"""
Saves the current maximized and fullscreen
states and minimizes to tray if requested
"""
if event.changed_mask & Gdk.WindowState.MAXIMIZED:
settings.set_option('gui/mainw_maximized',
bool(event.new_window_state & Gdk.WindowState.MAXIMIZED))
if event.changed_mask & Gdk.WindowState.FULLSCREEN:
self._fullscreen = bool(event.new_window_state & Gdk.WindowState.FULLSCREEN)
self.notify('is-fullscreen')
# detect minimization state changes
prev_minimized = self.minimized
if not self.minimized:
if event.changed_mask & Gdk.WindowState.ICONIFIED and \
not event.changed_mask & Gdk.WindowState.WITHDRAWN and \
event.new_window_state & Gdk.WindowState.ICONIFIED and \
not event.new_window_state & Gdk.WindowState.WITHDRAWN and \
not self.window_state & Gdk.WindowState.ICONIFIED:
self.minimized = True
else:
if event.changed_mask & Gdk.WindowState.WITHDRAWN and \
not event.new_window_state & (Gdk.WindowState.WITHDRAWN): #and \
self.minimized = False
# track this
self.window_state = event.new_window_state
if settings.get_option('gui/minimize_to_tray', False):
# old code to detect minimization
# -> it must have worked at some point, perhaps this is a GTK version
# specific set of behaviors? Current code works now on 2.24.17
#if wm_state is not None:
# if '_NET_WM_STATE_HIDDEN' in wm_state[2]:
# show tray
# window.hide
#else
# destroy tray
if self.minimized != prev_minimized and self.minimized == True:
if not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is None:
self.controller.tray_icon = tray.TrayIcon(self)
window.hide()
elif not settings.get_option('gui/use_tray', False) and \
self.controller.tray_icon is not None:
self.controller.tray_icon.destroy()
self.controller.tray_icon = None
return False
def get_selected_page(self):
"""
Returns the currentry displayed playlist notebook page
"""
return self.playlist_container.get_current_tab()
def get_selected_playlist(self):
try:
page = self.get_selected_page()
except AttributeError:
return None
if not isinstance(page, PlaylistPage):
return None
return page
class MainWindowTrackInfoPane(info.TrackInfoPane, providers.ProviderHandler):
"""
Extends the regular track info pane by an area for custom widgets
The mainwindow-info-area-widget provider is used to show widgets
on the right of the info area. They should be small. The registered
provider should provide a method 'create_widget' that takes the info
area instance as a parameter, and that returns a Gtk.Widget to be
inserted into the widget_area of the info area, and an attribute
'name' that will be used when removing the provider.
"""
def __init__(self, player):
info.TrackInfoPane.__init__(self, player)
self.__player = player
self.widget_area = Gtk.Box()
self.get_child().pack_start(self.widget_area, False, False, 0)
self.__widget_area_widgets = {}
# call this last if we're using simple_init=True
providers.ProviderHandler.__init__(self, 'mainwindow-info-area-widget',
target=player, simple_init=True)
def get_player(self):
'''
Retrieves the player object that this info area
is associated with
'''
return self._TrackInfoPane__player
def on_provider_added(self, provider):
name = provider.name
widget = provider.create_widget(self)
old_widget = self.__widget_area_widgets.get(name)
if old_widget is not None:
self.widget_area.remove(old_widget)
old_widget.destroy()
self.__widget_area_widgets[name] = widget
self.widget_area.pack_start(widget, False, False, 0)
widget.show_all()
def on_provider_removed(self, provider):
widget = self.__widget_area_widgets.pop(provider.name, None)
if widget is not None:
self.widget_area.remove(widget)
widget.destroy()
def get_playlist_container():
return MainWindow._mainwindow.playlist_container
def get_playlist_notebook():
'''Retrieves the primary playlist notebook'''
return MainWindow._mainwindow.playlist_container.notebooks[0]
def get_selected_page():
return MainWindow._mainwindow.get_selected_page()
def get_selected_playlist():
return MainWindow._mainwindow.get_selected_playlist()
def mainwindow():
return MainWindow._mainwindow
# vim: et sts=4 sw=4
| gpl-2.0 | -6,720,579,076,938,104,000 | 35.930918 | 105 | 0.589593 | false | 3.948212 | false | false | false |
amw2104/fireplace | fireplace/cards/classic/paladin.py | 1 | 2853 | from ..utils import *
##
# Hero Powers
# Reinforce (Uther Lightbringer)
class CS2_101:
activate = Summon(CONTROLLER, "CS2_101t")
# Reinforce (Uther Skin 1)
class CS2_101_H1:
activate = CS2_101.activate
##
# Minions
# Guardian of Kings
class CS2_088:
play = Heal(FRIENDLY_HERO, 6)
# Argent Protector
class EX1_362:
play = GiveDivineShield(TARGET)
# Aldor Peacekeeper
class EX1_382:
play = Buff(TARGET, "EX1_382e")
class EX1_382e:
atk = SET(1)
# Tirion Fordring
class EX1_383:
deathrattle = Summon(CONTROLLER, "EX1_383t")
##
# Spells
# Blessing of Might
class CS2_087:
play = Buff(TARGET, "CS2_087e")
CS2_087e = buff(atk=3)
# Holy Light
class CS2_089:
play = Heal(TARGET, 6)
# Blessing of Kings
class CS2_092:
play = Buff(TARGET, "CS2_092e")
CS2_092e = buff(+4, +4)
# Consecration
class CS2_093:
play = Hit(ENEMY_CHARACTERS, 2)
# Hammer of Wrath
class CS2_094:
play = Hit(TARGET, 3), Draw(CONTROLLER)
# Divine Favor
class EX1_349:
play = DrawUntil(CONTROLLER, Count(ENEMY_HAND))
# Lay on Hands
class EX1_354:
play = Heal(TARGET, 8), Draw(CONTROLLER) * 3
# Blessed Champion
class EX1_355:
play = Buff(TARGET, "EX1_355e")
class EX1_355e:
atk = lambda self, i: i * 2
# Humility
class EX1_360:
play = Buff(TARGET, "EX1_360e")
class EX1_360e:
atk = SET(1)
# Blessing of Wisdom
class EX1_363:
play = Buff(TARGET, "EX1_363e")
class EX1_363e:
events = Attack(OWNER).on(Draw(CONTROLLER))
# Blessing of Wisdom (Unused)
class EX1_363e2:
events = Attack(OWNER).on(Draw(OWNER_OPPONENT))
# Holy Wrath
class EX1_365:
play = Draw(CONTROLLER).then(Hit(TARGET, COST(Draw.CARD)))
# Hand of Protection
class EX1_371:
play = GiveDivineShield(TARGET)
# Avenging Wrath
class EX1_384:
def play(self):
count = self.controller.get_spell_damage(8)
yield Hit(RANDOM_ENEMY_CHARACTER, 1) * count
# Equality
class EX1_619:
play = Buff(ALL_MINIONS, "EX1_619e")
class EX1_619e:
max_health = SET(1)
##
# Secrets
# Noble Sacrifice
class EX1_130:
secret = Attack(ENEMY_MINIONS).on(FULL_BOARD | (
Reveal(SELF), Retarget(Attack.ATTACKER, Summon(CONTROLLER, "EX1_130a"))
))
# Eye for an Eye
class EX1_132:
secret = Damage(FRIENDLY_HERO).on(
Reveal(SELF), Hit(ENEMY_HERO, Damage.AMOUNT)
)
# Redemption
class EX1_136:
secret = Death(FRIENDLY + MINION).on(FULL_BOARD | (
Reveal(SELF),
Summon(CONTROLLER, Copy(Death.ENTITY)).then(SetCurrentHealth(Summon.CARD, 1))
))
# Repentance
class EX1_379:
secret = Play(OPPONENT, MINION | HERO).after(
Reveal(SELF), Buff(Play.CARD, "EX1_379e")
)
class EX1_379e:
max_health = SET(1)
##
# Weapons
# Truesilver Champion
class CS2_097:
events = Attack(FRIENDLY_HERO).on(Heal(FRIENDLY_HERO, 2))
# Sword of Justice
class EX1_366:
events = Summon(CONTROLLER, MINION).after(
Buff(Summon.CARD, "EX1_366e"),
Hit(SELF, 1)
)
EX1_366e = buff(+1, +1)
| agpl-3.0 | -3,566,954,898,071,706,600 | 14.256684 | 79 | 0.685594 | false | 2.196305 | false | false | false |
renyi533/tensorflow | tensorflow/python/keras/mixed_precision/experimental/policy.py | 1 | 25763 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import device_compatibility_check
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. When the variable dtype does not match the compute
dtype, variables will be automatically casted to the compute dtype to avoid
type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the
variable dtype, not the compute dtype. See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on how to use mixed precision.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
>>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # Dense layers use global policy of 'mixed_float16', which does
... # computations in float16 while keeping variables in float32.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... # Softmax should be done in float32 for numeric stability. We pass
... # dtype='float32' to use float32 instead of the global policy.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... tf.keras.layers.Dense(10, dtype=policy),
... tf.keras.layers.Dense(10, dtype=policy),
... # Softmax should be done in float32 for numeric stability.
... tf.keras.layers.Activation('softmax', dtype='float32')
... ])
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
See [the mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on using mixed precision
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
>>> tf.keras.mixed_precision.experimental.set_policy('float64')
>>> model = tf.keras.models.Sequential([
... tf.keras.layers.Input((100,)),
... # All layers use global policy of 'float64', which does computations
... # and creates variables in float64.
... tf.keras.layers.Dense(10),
... tf.keras.layers.Dense(10),
... tf.keras.layers.Activation('softmax')
... ])
>>> # Optionaly set policy back to float32 if any other models use float32
>>> tf.keras.mixed_precision.experimental.set_policy('float32')
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> # `layer` casts it's inputs to its compute dtype, which is float32, and
>>> # does computations in float32.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer, it is recommended to accept tensors only in the
first argument. This way, all tensors are casted to the layer's compute dtype.
`MyLayer` should therefore be written as:
>>> class MyLayer(tf.keras.layers.Layer):
... # Now, all tensor inputs will be casted.
... def call(self, inputs):
... a, b = inputs
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer((a, b))
>>> x.dtype
tf.float64
>>> y.dtype
tf.float64
Other arguments are not automatically casted for technical reasons, but this
may change in a future minor release.
A layer subclass can prevent its inputs from being autocasted by passing
`autocast=False` to the layer constructor. For example:
>>> class NonAutoCastingLayer(tf.keras.layers.Layer):
... def __init__(self, **kwargs):
... kwargs['autocast'] = False
... super(NonAutoCastingLayer, self).__init__(**kwargs)
... def call(self, inp):
... return inp
>>> x = tf.ones((4, 4, 4, 4), dtype='float32')
>>> layer = NonAutoCastingLayer(dtype='float64')
>>> y = layer(x) # Will not cast inputs to it's compute dtype of float64
>>> y.dtype
tf.float32
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an `AutoCastVariable`.
This wrapper is identical to the original variable except it casts itself to
the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`,
the variable is not casted.
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`:
>>> class MyLayer(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.x = self.add_weight('x')
... self.y = self.add_weight('y', experimental_autocast=False)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyLayer(dtype=policy)
>>> layer.build((2, 2))
>>> layer.x
<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, numpy=...>
>>> layer.y
<tf.Variable 'y:0' shape=() dtype=float32, numpy=...>
Passing `experimental_autocast=False` is useful for layers which may
internally do some math in the variable dtype instead of the compute dtype.
For example, you may wish to compute variable statistics, such as mean and
variance, in the variable dtype.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in the
case of mixed precision, wraps variables with `AutoCastVariables`.
For example, this simple dense layer does not require any additional work to
support mixed precision or float64. Keras automatically casts the inputs and
variable to the appropriate dtype.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... return tf.matmul(inputs, self.kernel)
>>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
>>> layer = MyDense(dtype=policy)
>>> x = np.random.rand(10, 10)
>>> y = layer(x)
>>> y.dtype
tf.float16
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.constant`. In such cases, you must create the tensor of the correct dtype.
For example, suppose you modify the `MyDense` layer to add a random number to
the output using `tf.random.normal`. You must pass the input dtype to
`tf.random.normal` to ensure the dtypes match.
>>> class MyDense(tf.keras.layers.Layer):
... def build(self, input_shape):
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
... def call(self, inputs):
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return tf.matmul(inputs, self.kernel) + rand
>>>
>>> layer = MyDense(dtype=policy)
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError`
would have occurred. This is because the dtype defaults to `"float32"`, so the
layer would only work if the inputs were float32.
### The deprecated "infer" policy
In addition to the above mentioned policies, a policy can also be "infer".
This Policy is deprecated, and it is not recommended. When a layer has an
infer policy, it will infer the computation and variable dtype from the first
input the first time the layer is called. Once the layer is called for the
first time, the layer's policy will change to the dtype of the first input.
In TensorFlow 1, only the "infer" policy is available.
"""
def __init__(self, name, loss_scale=USE_DEFAULT):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, the default
loss scale, and has no additional effect on the Policy. The compute and
variable dtypes can only be specified through `name`, and cannot be
specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, an int (which
uses a `FixedLossScale`), or the string "dynamic" (which uses a
`DynamicLossScale`). Defaults to using no loss scaling unless `name` is
"mixed_float16", in which case this defaults to "dynamic". Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit`, `Model.train_on_batch`, and other similar methods.
"""
if isinstance(name, dtypes.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, six.string_types):
raise TypeError("'name' must be a string, but got: %s" % (name,))
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if loss_scale == USE_DEFAULT:
loss_scale = 'dynamic' if name == 'mixed_float16' else None
self._using_default_loss_scale = True
else:
self._using_default_loss_scale = False
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warn('Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if name in ('mixed_float16', 'mixed_bloat16'):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith('_float32_vars'):
error_msg = ('Policies ending in \'_float32_vars\' have been removed '
'from TensorFlow.')
if name in ('infer_float32_vars', 'infer_with_float32_vars'):
error_msg += (' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
'policy instead.')
elif name == 'float16_with_float32_vars':
error_msg += (' Please use the \'mixed_float16\' policy instead.')
elif name == 'bfloat16_with_float32_vars':
error_msg += (' Please use the \'mixed_bfloat16\' policy instead.')
error_msg += ' Got policy name: \'%s\'' % name
raise ValueError(error_msg)
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
elif name == 'infer':
return None, None
try:
dtype = dtypes.as_dtype(name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include include 'mixed_float16', "
"'mixed_bfloat16', and the name of any dtype such as "
"'float32'." % (name,))
# six.raise_from suppresses the original TypeError from being raised
six.raise_from(ValueError(error), None)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
[b]float16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32, or some other device-internal
intermediate format with higher precision than [b]float16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul
will do use float32 intermediate math. The performance benefit of float16 is
still apparent, due to increased memory bandwidth and the fact modern GPUs
have specialized hardware for computing matmuls on float16 while still
keeping intermediate computations in float32.
Returns:
The compute dtype of this policy, or None if the compute dtype should be
inferred from the inputs.
"""
return self._compute_dtype
@property
def should_cast_variables(self):
"""Returns True if variables should be casted.
This is true if the variable dtype is not the same as the compute dtype.
Returns:
True, if variables should be casted.
"""
return self.variable_dtype != self.compute_dtype
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def get_config(self):
config = {
'name': self.name
}
if not self._using_default_loss_scale:
# We only include the loss scale if the default loss scale is not used.
# This allows us to change the loss scale config format without breaking
# users who use the default loss scale.
config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'loss_scale' in config and isinstance(config['loss_scale'], dict):
config = config.copy()
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.experimental.global_policy')
def global_policy():
"""Returns the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no policy has been set with
`keras.mixed_precision.experimental.set_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults
to float32), or an "infer" policy in TensorFlow 1.
See `keras.mixed_precision.experimental.Policy` for more information.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('infer')
return _global_policy
def policy_defaults_to_floatx():
"""Returns True if `global_policy()` will use the current value of floatx."""
return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled()
def _check_if_mixed_precision_graph_rewrite_is_enabled():
# TODO(reedwm): Update this comment once the Keras API is complete.
if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
raise ValueError(
'The mixed precision policy cannot be set, because the mixed '
'precision graph rewrite has already been enabled.\n'
'At most, one of the following functions can be called:\n\n'
' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. If in doubt which function to use, '
'use the second, as it supports Eager execution and is more '
'customizable.')
@keras_export('keras.mixed_precision.experimental.set_policy')
def set_policy(policy):
"""Sets the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no global policy is set, layers will
instead default to a Policy constructed from `tf.keras.backend.floatx()` in
TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy.
See `keras.mixed_precision.experimental.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy..
"""
global _global_policy
_check_if_mixed_precision_graph_rewrite_is_enabled()
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and
policy.compute_dtype):
raise ValueError(
'The global policy can only be set to a non-infer policy in TensorFlow '
'2')
_global_policy = policy
mixed_precision_global_state.using_default_mixed_precision_policy = (
_global_policy is None)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
def _is_convertible_to_dtype(dtype):
try:
dtypes.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and variable
dtypes are the same and the policy does not cause the layer/model to have
additional behavior, such as loss scaling.
The "infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a sublcass of Policy is never
# equivalent to a dtype.
return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck
list(policy.get_config().keys()) == ['name'] and
(policy.name == 'infer' or _is_convertible_to_dtype(policy.name)))
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of Keras. If the policy name is returned, it is a dtype string
# such as 'float32'.
return None if policy.name == 'infer' else policy.name
return generic_utils.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy('infer')
module_objects = {'Policy': Policy}
return generic_utils.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='dtype policy')
| apache-2.0 | 4,548,425,901,872,756,700 | 39.958665 | 102 | 0.695843 | false | 3.929083 | true | false | false |
googleapis/googleapis-gen | google/cloud/networkmanagement/v1/networkmanagement-v1-py/google/cloud/network_management_v1/services/reachability_service/transports/grpc.py | 1 | 21150 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.network_management_v1.types import connectivity_test
from google.cloud.network_management_v1.types import reachability
from google.longrunning import operations_pb2 # type: ignore
from .base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
class ReachabilityServiceGrpcTransport(ReachabilityServiceTransport):
"""gRPC backend transport for ReachabilityService.
The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'networkmanagement.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_connectivity_tests(self) -> Callable[
[reachability.ListConnectivityTestsRequest],
reachability.ListConnectivityTestsResponse]:
r"""Return a callable for the list connectivity tests method over gRPC.
Lists all Connectivity Tests owned by a project.
Returns:
Callable[[~.ListConnectivityTestsRequest],
~.ListConnectivityTestsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connectivity_tests' not in self._stubs:
self._stubs['list_connectivity_tests'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/ListConnectivityTests',
request_serializer=reachability.ListConnectivityTestsRequest.serialize,
response_deserializer=reachability.ListConnectivityTestsResponse.deserialize,
)
return self._stubs['list_connectivity_tests']
@property
def get_connectivity_test(self) -> Callable[
[reachability.GetConnectivityTestRequest],
connectivity_test.ConnectivityTest]:
r"""Return a callable for the get connectivity test method over gRPC.
Gets the details of a specific Connectivity Test.
Returns:
Callable[[~.GetConnectivityTestRequest],
~.ConnectivityTest]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connectivity_test' not in self._stubs:
self._stubs['get_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/GetConnectivityTest',
request_serializer=reachability.GetConnectivityTestRequest.serialize,
response_deserializer=connectivity_test.ConnectivityTest.deserialize,
)
return self._stubs['get_connectivity_test']
@property
def create_connectivity_test(self) -> Callable[
[reachability.CreateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connectivity test method over gRPC.
Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Returns:
Callable[[~.CreateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connectivity_test' not in self._stubs:
self._stubs['create_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/CreateConnectivityTest',
request_serializer=reachability.CreateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connectivity_test']
@property
def update_connectivity_test(self) -> Callable[
[reachability.UpdateConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connectivity test method over gRPC.
Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Returns:
Callable[[~.UpdateConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connectivity_test' not in self._stubs:
self._stubs['update_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/UpdateConnectivityTest',
request_serializer=reachability.UpdateConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connectivity_test']
@property
def rerun_connectivity_test(self) -> Callable[
[reachability.RerunConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the rerun connectivity test method over gRPC.
Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Returns:
Callable[[~.RerunConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'rerun_connectivity_test' not in self._stubs:
self._stubs['rerun_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/RerunConnectivityTest',
request_serializer=reachability.RerunConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['rerun_connectivity_test']
@property
def delete_connectivity_test(self) -> Callable[
[reachability.DeleteConnectivityTestRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connectivity test method over gRPC.
Deletes a specific ``ConnectivityTest``.
Returns:
Callable[[~.DeleteConnectivityTestRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connectivity_test' not in self._stubs:
self._stubs['delete_connectivity_test'] = self.grpc_channel.unary_unary(
'/google.cloud.networkmanagement.v1.ReachabilityService/DeleteConnectivityTest',
request_serializer=reachability.DeleteConnectivityTestRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connectivity_test']
__all__ = (
'ReachabilityServiceGrpcTransport',
)
| apache-2.0 | 3,437,077,281,151,357,400 | 45.792035 | 96 | 0.636359 | false | 4.8947 | true | false | false |
VlachosGroup/VlachosGroupAdditivity | pgradd/DrawMol.py | 1 | 2230 | """
=========================================
Defenition to draw RDKIT mol object (:mod:`pgradd.DrawMol`)
=========================================
Coverts a rdkit mol object to a svg image and display.
"""
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from IPython.display import SVG, display
# http://rdkit.blogspot.com/2015/02/new-drawing-code.html
def moltosvg(mol, highlight=[], molSize=(400, 400), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except Exception:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# Atom Label
opts = drawer.drawOptions()
# Atom name and index
for i in range(mol.GetNumAtoms()):
opts.atomLabels[i] = mol.GetAtomWithIdx(i).GetSymbol()+str(i)
# radicals and charges
for atom in mol.GetAtoms():
nr = atom.GetNumRadicalElectrons()
nc = atom.GetFormalCharge()
if nr > 0:
string = atom.GetSymbol() + ':'*divmod(nr, 2)[0] +\
'.'*divmod(nr, 2)[1]
opts.atomLabels[atom.GetIdx()] += string
elif nc == 1:
string = atom.GetSymbol() + '+'
opts.atomLabels[atom.GetIdx()] += string
elif nc > 1:
string = atom.GetSymbol() + '+' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
elif nc == -1:
string = atom.GetSymbol() + '-'
opts.atomLabels[atom.GetIdx()] += string
elif nc < -1:
string = atom.GetSymbol() + '-' + str(nc)
opts.atomLabels[atom.GetIdx()] += string
# highlight
if highlight:
drawer.DrawMolecule(mc, highlightAtoms=highlight)
else:
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
svg.replace('svg:', '')
display(SVG(svg))
| mit | 5,404,241,152,769,177,000 | 32.283582 | 75 | 0.58296 | false | 3.506289 | false | false | false |
aldebaran/qibuild | python/qitest/parsers.py | 1 | 7334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Collection of parser fonctions for qitests actions """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import qisys.parsers
import qitest.project
import qibuild.parsers
class EmptyTestListException(Exception):
""" No test to run exception """
pass
def test_parser(parser, with_num_jobs=True):
""" Test Parser """
qisys.parsers.worktree_parser(parser)
group = parser.add_argument_group("test options")
group.add_argument("--perf", dest="perf", action="store_true",
help="run perfs tests instead of pure tests.")
group.add_argument("-k", "--pattern", dest="patterns", action="append",
help="Filter tests matching these patterns")
group.add_argument("-x", "--exclude", dest="excludes", action="append",
help="Exclude test matching these patterns")
group.add_argument("-V", dest="verbose_tests", action="store_true",
help="display tests output")
group.add_argument("--valgrind", dest="valgrind", action="store_true",
help="run tests under valgrind")
group.add_argument("--nightmare", dest="nightmare", action="store_true",
help="run tests in shuffle and 20 times (apply only to gtest)")
group.add_argument("--coverage", dest="coverage", action="store_true",
help="run coverage")
group.add_argument("--ncpu", dest="num_cpus", default=-1, type=int,
help="set number of CPU each test is allowed to use (linux)")
group.add_argument("--nightly", action="store_true", dest="nightly")
group.add_argument("--break-on-failure", action="store_true", dest="break_on_failure",
help="Break on failure (for gtest only)")
group.add_argument("--repeat-until-fail", default=0, type=int, metavar="N",
help="Repeat tests until they fail (at most N times)")
group.add_argument("--qitest-json", dest="qitest_jsons", action="append")
group.add_argument("--test-output-dir", type=os.path.abspath,
dest="test_output_dir",
help="Generate XML test reports in the given directory "
"(instead of build-<platform>/sdk/test-results)")
group.add_argument("--coverage-output-dir", dest="coverage_output_dir",
help="Generate XML and HTML coverage reports in the given "
"directory (instead of build-<platform>/sdk/coverage-results)")
group.add_argument("--root-output-dir", dest="test_output_dir", metavar="ROOT_OUTPUT_DIR",
help="same as --test-output-dir (deprecated)")
group.add_argument("--no-capture", dest="capture", action="store_false")
group.add_argument("--ignore-timeouts", dest="ignore_timeouts", action="store_true",
help="Ignore timeouts when running tests")
group.add_argument("--lf", "--last-failed", dest="last_failed", action="store_true",
help="Run the failing test from previous run")
group.add_argument("--allow-no-test", dest="allow_no_test", action="store_true",
help="Don't fail if no tests to run")
parser.set_defaults(nightly=False, capture=True, last_failed=False,
ignore_timeouts=False)
if with_num_jobs:
qisys.parsers.parallel_parser(group, default=1)
return group
def get_test_runner(args, build_project=None, qitest_json=None):
""" Get Test Runner """
test_project = None
if not qitest_json:
qitest_json = vars(args).get("qitest_json")
if not qitest_json:
candidate = os.path.join(os.getcwd(), "qitest.json")
if os.path.exists(candidate):
qitest_json = candidate
if qitest_json:
test_project = qitest.project.TestProject(qitest_json)
if not test_project:
if build_project:
test_project = build_project.to_test_project()
else:
return None
test_runner = qibuild.test_runner.ProjectTestRunner(test_project)
if build_project:
test_runner.cwd = build_project.sdk_directory
test_runner.env = build_project.build_worktree.get_env()
else:
test_runner.cwd = qisys.sh.to_native_path(os.path.dirname(qitest_json))
test_runner.patterns = args.patterns
test_runner.excludes = args.excludes
test_runner.perf = args.perf
test_runner.coverage = args.coverage
test_runner.break_on_failure = args.break_on_failure
test_runner.valgrind = args.valgrind
test_runner.verbose = args.verbose_tests
test_runner.num_cpus = args.num_cpus
test_runner.num_jobs = args.num_jobs
test_runner.repeat_until_fail = args.repeat_until_fail
test_runner.nightly = args.nightly
test_runner.nightmare = args.nightmare
test_runner.test_output_dir = args.test_output_dir
test_runner.capture = args.capture
test_runner.last_failed = args.last_failed
test_runner.ignore_timeouts = args.ignore_timeouts
return test_runner
def parse_build_projects(args):
""" Parse Build Projects """
res = list()
try:
build_worktree = qibuild.parsers.get_build_worktree(args)
solve_deps = False
if args.use_deps:
solve_deps = True
build_projects = qibuild.parsers.get_build_projects(
build_worktree,
args, solve_deps=solve_deps)
for build_project in build_projects:
test_runner = None
try:
test_runner = get_test_runner(args, build_project=build_project)
except qibuild.project.NoQiTestJson:
pass
if test_runner:
res.append(test_runner)
except (qisys.worktree.NotInWorkTree, qibuild.parsers.CouldNotGuessProjectName):
pass
return res
def get_test_runners(args):
""" Get Test Runners """
res = list()
qitest_jsons = args.qitest_jsons or list()
# first case: qitest.json in current working directory
test_runner = get_test_runner(args)
if test_runner:
res.append(test_runner)
# second case: qitest.json specified with --qitest-json
for qitest_json in qitest_jsons:
test_runner = get_test_runner(args, qitest_json=qitest_json)
res.append(test_runner)
# third case: parsing build projects
build_projects_runners = parse_build_projects(args)
# avoid appending a test_runner guessed from a build project
# when res already contains a test runner computed from a
# --qitest-json argument
known_cwds = [x.cwd for x in res]
for test_runner in build_projects_runners:
if test_runner.cwd not in known_cwds:
res.append(test_runner)
if args.coverage and not build_projects_runners:
raise Exception("""--coverage can only be used from a qibuild CMake project\n""")
elif args.coverage:
return build_projects_runners
if not res:
raise EmptyTestListException("Nothing found to test")
return res
| bsd-3-clause | 6,434,639,803,409,143,000 | 43.993865 | 94 | 0.637715 | false | 3.890716 | true | false | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCurrentlyTLingBuniMi.py | 1 | 1148 | def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | -392,690,096,227,605,250 | 44.96 | 151 | 0.690767 | false | 3.045093 | false | false | false |
Froff/TFY4115-Simulering | python/Simulation.py | 1 | 1185 | from math import sqrt
import Slope
class Simulation:
SIM_STEP_SIZE = 0.0001
const_g = -981
def __init__ (self, slope, **kwargs):
self.slope = slope
self.t = [0]
self.x = [Simulation.SIM_STEP_SIZE]
self.mom_inertia_coefficient = 0
for name, value in kwargs.items():
if name == "startingposition":
self.x = [value]
if name == "momentofintertiacoefficient":
self.mom_inertia_coefficient = value
def runSimulation(self):
while not self.isFinished():
self.step()
def step (self):
x = self.x[-1]
dydx = self.slope.dydx(x)
y = self.slope.f(x) - self.slope.f(0)
I = self.mom_inertia_coefficient
g = Simulation.const_g
step_size = Simulation.SIM_STEP_SIZE
try:
self.x.append(x + step_size * sqrt( (2*g*y) / ( (1 + I) * (1 + dydx**2) ) ))
self.t.append(self.t[-1] + Simulation.SIM_STEP_SIZE)
except ValueError:
print("Math domain error. x={}, y={}".format(x, y))
exit(2)
def isFinished (self):
return self.x[-1] >= self.slope.end
| mit | 6,737,321,104,293,273,000 | 30.184211 | 88 | 0.533333 | false | 3.395415 | false | false | false |
erccarls/vectorsearch | vectorsearch/word2vec.py | 1 | 4242 | from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import gensim
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
class Word2Vec(gensim.models.Word2Vec):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._stem_memory = defaultdict(set)
def most_similar(self, words={}, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
words : a dict where the words are the keys and the weights are the values.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
# if isinstance(positive, string_types) and not negative:
# # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
# positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
# positive = [
# (word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in positive
# ]
# negative = [
# (word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
# for word in negative
# ]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in words.items():
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
Warning("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
| apache-2.0 | -2,757,497,388,881,234,400 | 38.654206 | 116 | 0.656294 | false | 4.055449 | false | false | false |
CloudBreadPaPa/azure-ml-python-seminar | code/python/ml-Iris.py | 1 | 1412 | import urllib2
# If you are using Python 3+, import urllib instead of urllib2
import json
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"],
"Values": [ [ "1", "1", "1", "1", "" ], ]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
url = 'https://asiasoutheast.services.azureml.net/workspaces/46d0e60b05b34558827abd41f11d204f/services/acac88a083ce443789028306375ddf56/execute?api-version=2.0&details=true'
api_key = '<change here>' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
req = urllib2.Request(url, body, headers)
try:
response = urllib2.urlopen(req)
# If you are using Python 3+, replace urllib2 with urllib.request in the above code:
# req = urllib.request.Request(url, body, headers)
# response = urllib.request.urlopen(req)
result = response.read()
print(result)
except urllib2.HTTPError, error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(json.loads(error.read()))
| mit | -7,397,852,236,911,984,000 | 30.377778 | 173 | 0.626771 | false | 3.49505 | false | false | false |
wisechengyi/pants | src/python/pants/util/collections.py | 1 | 3201 | # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import collections
import collections.abc
from typing import Any, Callable, DefaultDict, Iterable, List, MutableMapping, Type, TypeVar, Union
_K = TypeVar("_K")
_V = TypeVar("_V")
def factory_dict(value_factory: Callable[[_K], _V], *args, **kwargs) -> DefaultDict:
"""A dict whose values are computed by `value_factory` when a `__getitem__` key is missing.
Note that values retrieved by any other method will not be lazily computed; eg: via `get`.
:param value_factory:
:param *args: Any positional args to pass through to `dict`.
:param **kwrags: Any kwargs to pass through to `dict`.
"""
class FactoryDict(collections.defaultdict):
@staticmethod
def __never_called():
raise AssertionError(
"The default factory should never be called since we override " "__missing__."
)
def __init__(self):
super().__init__(self.__never_called, *args, **kwargs)
def __missing__(self, key):
value = value_factory(key)
self[key] = value
return value
return FactoryDict()
def recursively_update(d: MutableMapping, d2: MutableMapping) -> None:
"""dict.update but which merges child dicts (dict2 takes precedence where there's conflict)."""
for k, v in d2.items():
if k in d:
if isinstance(v, dict):
recursively_update(d[k], v)
continue
d[k] = v
_T = TypeVar("_T")
def assert_single_element(iterable: Iterable[_T]) -> _T:
"""Get the single element of `iterable`, or raise an error.
:raise: :class:`StopIteration` if there is no element.
:raise: :class:`ValueError` if there is more than one element.
"""
it = iter(iterable)
first_item = next(it)
try:
next(it)
except StopIteration:
return first_item
raise ValueError(f"iterable {iterable!r} has more than one element.")
def ensure_list(val: Union[Any, Iterable[Any]], *, expected_type: Type[_T]) -> List[_T]:
"""Given either a single value or an iterable of values, always return a list.
This performs runtime type checking to ensure that every element of the list is the expected
type.
"""
if isinstance(val, expected_type):
return [val]
if not isinstance(val, collections.abc.Iterable):
raise ValueError(
f"The value {val} (type {type(val)}) did not have the expected type {expected_type} "
"nor was it an iterable."
)
result: List[_T] = []
for i, x in enumerate(val):
if not isinstance(x, expected_type):
raise ValueError(
f"Not all elements of the iterable have type {expected_type}. Encountered the "
f"element {x} of type {type(x)} at index {i}."
)
result.append(x)
return result
def ensure_str_list(val: Union[str, Iterable[str]]) -> List[str]:
"""Given either a single string or an iterable of strings, always return a list."""
return ensure_list(val, expected_type=str)
| apache-2.0 | 1,141,446,506,871,677,600 | 32.34375 | 99 | 0.621993 | false | 4.046776 | false | false | false |
devdelay/home-assistant | homeassistant/util/__init__.py | 1 | 13534 | """Helper methods for various modules."""
from collections.abc import MutableSet
from itertools import chain
import threading
import queue
from datetime import datetime
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from typing import Any, Sequence
from .dt import as_local, utcnow
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
def sanitize_filename(filename):
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path):
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
text = text.lower().replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return as_local(inp).isoformat()
else:
return str(inp)
def convert(value, to_type, default=None):
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str,
current_strings: Sequence[str]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip():
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length=10):
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# pylint: disable=no-init, too-few-public-methods
def __ge__(self, other):
"""Return the greater than element."""
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
"""Return the greater element."""
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
"""Return the lower than element."""
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
"""Return the lower element."""
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(MutableSet):
"""Ordered set taken from http://code.activestate.com/recipes/576694/."""
def __init__(self, iterable=None):
"""Initialize the set."""
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
"""Return the length of the set."""
return len(self.map)
def __contains__(self, key):
"""Check if key is in set."""
return key in self.map
def add(self, key):
"""Add an element to the end of the set."""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
"""Promote element to beginning of the set, add if not there."""
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
"""Discard an element from the set."""
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
"""Iteration of the set."""
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
"""Reverse the ordering."""
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True): # pylint: disable=arguments-differ
"""Pop element of the end of the set.
Set last=False to pop from the beginning.
"""
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
"""Add elements from args to the set."""
for item in chain(*args):
self.add(item)
def __repr__(self):
"""Return the representation."""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""Return the comparision."""
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
# pylint: disable=too-few-public-methods
def __init__(self, min_time, limit_no_throttle=None):
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
"""Caller for the throttle."""
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname seperated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args, **kwargs):
"""Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = method.__self__
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return None
# Check if method is never called or no_throttle is given
force = not throttle[1] or kwargs.pop('no_throttle', False)
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result
else:
return None
finally:
throttle[0].release()
return wrapper
class ThreadPool(object):
"""A priority queue-based thread pool."""
# pylint: disable=too-many-instance-attributes
def __init__(self, job_handler, worker_count=0, busy_callback=None):
"""Initialize the pool.
job_handler: method to be called from worker thread to handle job
worker_count: number of threads to run that handle jobs
busy_callback: method to be called when queue gets too big.
Parameters: worker_count, list of current_jobs,
pending_jobs_count
"""
self._job_handler = job_handler
self._busy_callback = busy_callback
self.worker_count = 0
self.busy_warning_limit = 0
self._work_queue = queue.PriorityQueue()
self.current_jobs = []
self._lock = threading.RLock()
self._quit_task = object()
self.running = True
for _ in range(worker_count):
self.add_worker()
def add_worker(self):
"""Add worker to the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
worker = threading.Thread(
target=self._worker,
name='ThreadPool Worker {}'.format(self.worker_count))
worker.daemon = True
worker.start()
self.worker_count += 1
self.busy_warning_limit = self.worker_count * 3
def remove_worker(self):
"""Remove worker from the thread pool and reset warning limit."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(0, self._quit_task))
self.worker_count -= 1
self.busy_warning_limit = self.worker_count * 3
def add_job(self, priority, job):
"""Add a job to the queue."""
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(priority, job))
# Check if our queue is getting too big.
if self._work_queue.qsize() > self.busy_warning_limit \
and self._busy_callback is not None:
# Increase limit we will issue next warning.
self.busy_warning_limit *= 2
self._busy_callback(
self.worker_count, self.current_jobs,
self._work_queue.qsize())
def block_till_done(self):
"""Block till current work is done."""
self._work_queue.join()
def stop(self):
"""Finish all the jobs and stops all the threads."""
self.block_till_done()
with self._lock:
if not self.running:
return
# Tell the workers to quit
for _ in range(self.worker_count):
self.remove_worker()
self.running = False
# Wait till all workers have quit
self.block_till_done()
def _worker(self):
"""Handle jobs for the thread pool."""
while True:
# Get new item from work_queue
job = self._work_queue.get().item
if job is self._quit_task:
self._work_queue.task_done()
return
# Add to current running jobs
job_log = (utcnow(), job)
self.current_jobs.append(job_log)
# Do the job
self._job_handler(job)
# Remove from current running job
self.current_jobs.remove(job_log)
# Tell work_queue the task is done
self._work_queue.task_done()
class PriorityQueueItem(object):
"""Holds a priority and a value. Used within PriorityQueue."""
# pylint: disable=too-few-public-methods
def __init__(self, priority, item):
"""Initialize the queue."""
self.priority = priority
self.item = item
def __lt__(self, other):
"""Return the ordering."""
return self.priority < other.priority
| mit | -2,104,050,902,340,730,000 | 30.328704 | 79 | 0.570637 | false | 4.208333 | false | false | false |
bxlab/bx-python | lib/bx/align/epo.py | 1 | 11523 | """Classes and utilities for mutliple alignments from the EPO pipeline"""
import logging
import os
import pickle as cPickle
import re
from collections import namedtuple
from ._epo import ( # noqa: F401
bed_union,
cummulative_intervals,
fastLoadChain,
rem_dash
)
log = logging.getLogger(__name__)
class Chain(namedtuple('Chain', 'score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id')):
"""A Chain header as in http://genome.ucsc.edu/goldenPath/help/chain.html
chain coordinates are with respect to the strand, so for example tStart on the + strand is the
distance from the leftmost position; tStart on the - strand is the distance from the rightmost position."""
__slots__ = ()
def __str__(self):
return "chain {score} {tName} {tSize} {tStrand} {tStart} {tEnd} {qName} {qSize} {qStrand} {qStart} {qEnd} {id}".format(**self._asdict())
@classmethod
def _strfactory(cls, line):
"""factory class method for Chain
:param line: header of a chain (in .chain format)
"""
assert isinstance(line, str), "this is a factory from string"
line = line.rstrip().split()[1:] # the first component is the keyword "chain"
tup = [t[0](t[1]) for t in zip([int, str, int, str, int, int, str, int, str, int, int, str], line)]
return tuple.__new__(cls, tup)
@classmethod
def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes):
"""crate a chain of collinear rings from the given components.
The target of the chain will always be on the forward strand.
This is done to avoid confusion when mapping psl files. So,
if trg_comp.strand=-, qr_comp.strand=- (resp. +) the
chain header will have tStrand=+, qStrand=+ (resp. -). No strand
changes on the other cases.
:param trg_comp: target (i.e, the first) component
:type trg_comp: L{EPOitem}
:param qr_comp: query (i.e, the second) component
:type qr_comp: L{EPOitem}
:param trg_chrom_sizes: chromosome sizes of the target
:type trg_chrom_sizes: dictionary of the type (chrom) --> size
:param qr_chrom_sizes: chromosome sizes of the query
:type qr_chrom_sizes: dictionary of the type (chrom) --> size
:return: A L{Chain} instance"""
# size, target, query arrays
S, T, Q = [], [], []
# the target strand of the chain must be on the forward strand
trg_intervals = trg_comp.intervals(reverse=trg_comp.strand == '-')
qr_intervals = qr_comp.intervals(reverse=trg_comp.strand == '-')
if len(trg_intervals) == 0 or len(qr_intervals) == 0:
log.warning("deletion/insertion only intervals")
return None
A, B = rem_dash(trg_intervals, qr_intervals)
# correct for when cigar starts/ends with dashes (in number of bases)
tr_start_correction = max(B[0][0] - A[0][0], 0)
tr_end_correction = max(A[-1][1] - B[-1][1], 0)
qr_start_correction = max(A[0][0] - B[0][0], 0)
qr_end_correction = max(B[-1][1] - A[-1][1], 0)
a, b = A.pop(0), B.pop(0)
# intervals are 0-base, halfo-open => lengths = coordinate difference
while A or B:
if a[1] < b[1]:
T.append(0)
Q.append(A[0][0] - a[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
a = A.pop(0)
elif b[1] < a[1]:
Q.append(0)
T.append(B[0][0] - b[1])
S.append(min(a[1], b[1]) - max(a[0], b[0]))
b = B.pop(0)
elif A and B:
assert 1 > 2, "there are dash columns"
else:
break
S.append(min(a[1], b[1]) - max(a[0], b[0]))
assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q)))
tSize = trg_chrom_sizes[trg_comp.chrom]
qSize = qr_chrom_sizes[qr_comp.chrom]
# UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed
# chain_start = epo_start - 1 and chain_end = epo_end
if qr_comp.strand == '+':
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction,
qr_comp.gabid)
else:
chain = Chain(
0, trg_comp.chrom, tSize, "+",
(trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction,
qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'),
(qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction,
qr_comp.gabid)
# strand correction. in UCSC coordinates this is: size - coord
if chain.qStrand == '-':
chain = chain._replace(
qEnd=chain.qSize - chain.qStart,
qStart=chain.qSize - chain.qEnd)
assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % (
str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T))
assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % (
str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q))
return chain, S, T, Q
def slice(self, who):
"return the slice entry (in a bed6 format), AS IS in the chain header"
assert who in ('t', 'q'), "who should be 't' or 'q'"
if who == 't':
return (self.tName, self.tStart, self.tEnd, self.id, self.score, self.tStrand)
else:
return (self.qName, self.qStart, self.qEnd, self.id, self.score, self.qStrand)
def bedInterval(self, who):
"return a BED6 entry, thus DOES coordinate conversion for minus strands"
if who == 't':
st, en = self.tStart, self.tEnd
if self.tStrand == '-':
st, en = self.tSize-en, self.tSize-st
return (self.tName, st, en, self.id, self.score, self.tStrand)
else:
st, en = self.qStart, self.qEnd
if self.qStrand == '-':
st, en = self.qSize-en, self.qSize-st
assert en-st == self.qEnd - self.qStart
return (self.qName, st, en, self.id, self.score, self.qStrand)
@classmethod
def _parse_file(cls, path, pickle=False):
"""parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...]
:param fname: name of the file"""
fname = path
if fname.endswith(".gz"):
fname = path[:-3]
if fname.endswith('.pkl'):
# you asked for the pickled file. I'll give it to you
log.debug("loading pickled file %s ...", fname)
with open(fname, "rb") as f:
return cPickle.load(f)
elif os.path.isfile("%s.pkl" % fname):
# there is a cached version I can give to you
log.info("loading pickled file %s.pkl ...", fname)
if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime:
log.critical("*** pickled file %s.pkl is not up to date ***", fname)
try:
with open("%s.pkl" % fname, "rb") as f:
return cPickle.load(f)
except Exception:
log.warning("Loading pickled file %s.pkl failed", fname)
data = fastLoadChain(path, cls._strfactory)
if pickle and not os.path.isfile('%s.pkl' % fname):
log.info("pickling to %s.pkl", fname)
with open('%s.pkl' % fname, 'wb') as f:
cPickle.dump(data, f)
return data
class EPOitem(namedtuple('Epo_item', 'species gabid chrom start end strand cigar')):
"this format is how alignments are delivered from e!"
__slots__ = ()
cigar_pattern = re.compile(r"(\d*)([MD])")
def __repr__(self):
return str(self)
def __str__(self):
c = self.cigar[:5] + "..." + self.cigar[-5:]
return "(%s %s %s %d %d %s %s)" % tuple(self[:6] + (c,))
@classmethod
def _strfactory(cls, line):
"""factory method for an EPOitem
:param line: a line of input"""
cmp = line.rstrip().split()
chrom = cmp[2]
if not chrom.startswith("chr"):
chrom = "chr%s" % chrom
instance = tuple.__new__(
cls,
(cmp[0], cmp[1], chrom, int(cmp[3]), int(cmp[4]), {'1': '+', '-1': '-'}[cmp[5]], cmp[6]))
span = instance.end - instance.start + 1
m_num = sum((t[1] == "M" and [t[0]] or [0])[0] for t in instance.cigar_iter(False))
if span != m_num:
log.warning("[{gabid}] {species}.{chrom}:{start}-{end}.".format(**instance._asdict()) + "(span) %d != %d (matches)" % (span, m_num))
return None
return instance
@classmethod
def _parse_epo(cls, fname):
"""Load an entire file in the EPO format into a dictionary of the type {gab_id => [Epoitem, ...]}
:param fname: file name"""
data = {}
with open(fname) as fd:
for el in (cls._strfactory(_) for _ in fd):
if el:
data.setdefault(el.gabid, []).append(el)
log.info("parsed %d elements from %s", len(data), fname)
return data
def cigar_iter(self, reverse):
"""self.cigar => [(length, type) ... ] iterate the cigar
:param reverse: whether to iterate in the reverse direction (right-to-left)
:type reverse: boolean
:return a list of pairs of the type [(length, M/D) ..]
"""
l = 0
P = self.cigar_pattern
data = []
cigar = self.cigar
parsed_cigar = re.findall(P, cigar)
if reverse:
parsed_cigar = parsed_cigar[::-1]
for _l, t in parsed_cigar:
# 1M is encoded as M
l = (_l and int(_l) or 1) # int(_l) cannot be 0
data.append((l, t))
return data
def intervals(self, reverse, thr=0):
"""return a list of (0-based half-open) intervals representing the match regions of the cigar
for example 4MD4M2DM with reverse=False will produce [(0,4), (5,9), (11,12)]
4MD4M2DM with reverse=True will produce [(0,1), (3,7), (8,12)] (= 12 - previous interval)
:param reverse: whether to iterate in the reverse direction (right-to-left) (this is passed as is to self.cigar_iter)
:type reverse: boolean
:param thr: shift all intervals by this much
:type thr: integer
:return: list of pairs"""
d = [(thr, thr)]
dl = 0
for tup in self.cigar_iter(reverse):
if tup[1] == "D":
dl = tup[0]
else:
s = d[-1][1] + dl
d.append((s, s+tup[0]))
assert d[0] == (thr, thr)
# assert that nr. of Ms in the interval == sum of produced intervals
assert sum(t[0] for t in self.cigar_iter(False) if t[1] == "M") == sum(t[1]-t[0] for t in d)
d_sum = sum(t[1]-t[0] for t in d)
assert self.end - self.start + 1 == d_sum, "[ (%d, %d) = %d ] != %d" % (
self.start, self.end, self.end-self.start+1, d_sum)
return d[1:] # clip the (thr, thr) entry
| mit | 7,633,953,274,690,669,000 | 38.462329 | 144 | 0.540484 | false | 3.350683 | false | false | false |
Arcensoth/cogbot | cogbot/cogs/join_leave/join_leave_server_state.py | 1 | 2346 | from discord import Member, Role
from discord.ext.commands import Context
from cogbot.cogs.abc.base_cog import BaseCogServerState
from cogbot.cogs.join_leave.join_leave_options import JoinLeaveOptions
class JoinLeaveServerState(BaseCogServerState[JoinLeaveOptions]):
async def create_options(self) -> JoinLeaveOptions:
return await JoinLeaveOptions().init(self, self.raw_options)
async def join_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias.lower()]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.add_roles(author, role)
await self.bot.say(f"{author.mention} has joined {role}")
except:
self.log.info(f"{author} failed to join the role: {role_alias}")
await self.bot.react_question(ctx)
async def leave_role(self, ctx: Context, author: Member, role_alias: str):
try:
role_entry = self.options.role_entry_from_alias[role_alias]
role = self.bot.get_role(self.server, role_entry.role_id)
await self.bot.remove_roles(author, role)
await self.bot.say(f"{author.mention} has left {role}")
except:
self.log.info(f"{author} failed to leave the role: {role_alias}")
await self.bot.react_question(ctx)
async def list_roles(self, ctx: Context, author: Member):
role_lines = []
for role_entry in self.options.role_entries:
role: Role = self.bot.get_role(self.server, role_entry.role_id)
role_lines.append(f"{role}")
role_aliases = role_entry.aliases
first_role_alias = role_aliases[0]
other_role_aliases = role_aliases[1:]
role_aliases_line = f" >join {first_role_alias}"
if other_role_aliases:
other_role_aliases_str = " or ".join(
f'"{role_alias}"' for role_alias in other_role_aliases
)
role_aliases_line = f"{role_aliases_line} (or {other_role_aliases_str})"
role_lines.append(role_aliases_line)
roles_str = "\n".join(role_lines)
await self.bot.say(
f"{author.mention} Available self-assignable roles:\n```\n{roles_str}\n```"
)
| mit | 4,599,399,970,453,194,000 | 45.92 | 88 | 0.6185 | false | 3.581679 | false | false | false |
mypinballs/whirlwind | effects.py | 1 | 8263 | # Top Rollover Lanes
__author__="jim"
__date__ ="$Jan 18, 2011 1:36:37 PM$"
import procgame
import locale
from procgame import *
base_path = config.value_for_key_path('base_path')
game_path = base_path+"games/whirlwind/"
class Effects(game.Mode):
def __init__(self, game, priority):
super(Effects, self).__init__(game, priority)
def drive_lamp(self, lamp_name, style='on',time=2):
if style == 'slow':
self.game.lamps[lamp_name].schedule(schedule=0x00ff00ff, cycle_seconds=0, now=True)
elif style == 'medium':
self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
elif style == 'fast':
self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
elif style == 'superfast':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
elif style == 'on':
self.game.lamps[lamp_name].enable()
elif style == 'off':
self.off(lamp_name)
elif style == 'smarton':
self.game.lamps[lamp_name].schedule(schedule=0xaaaaaaaa, cycle_seconds=0, now=True)
self.cancel_delayed(lamp_name+'_on')
self.delay(name=lamp_name+'_on', event_type=None, delay=0.6, handler=self.game.lamps[lamp_name].enable)
elif style == 'timedon':
self.game.lamps[lamp_name].enable()
self.cancel_delayed(lamp_name+'_off')
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
elif style == 'timeout':
if time>10:
self.cancel_delayed(lamp_name+'_medium')
self.delay(name=lamp_name+'_medium', event_type=None, delay=time-10, handler=lambda:self.drive_lamp(lamp_name,'medium'))
if time>5:
self.cancel_delayed(lamp_name+'_fast')
self.delay(name=lamp_name+'_fast', event_type=None, delay=time-5, handler=lambda:self.drive_lamp(lamp_name,'fast'))
if time>1:
self.cancel_delayed(lamp_name+'_superfast')
self.delay(name=lamp_name+'_superfast', event_type=None, delay=time-1, handler=lambda:self.drive_lamp(lamp_name,'superfast'))
self.delay(name=lamp_name+'_off', event_type=None, delay=time, handler=self.off,param=lamp_name)
def clear_lamp_timers(self,lamp_name):
self.cancel_delayed(lamp_name+'_medium')
self.cancel_delayed(lamp_name+'_fast')
self.cancel_delayed(lamp_name+'_superfast')
self.cancel_delayed(lamp_name+'on')
self.cancel_delayed(lamp_name+'_off')
def off(self,lamp_name):
self.clear_lamp_timers(lamp_name)
self.game.lamps[lamp_name].disable()
# def drive_super_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x99999999, cycle_seconds=0, now=True)
#
# def drive_fast(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x55555555, cycle_seconds=0, now=True)
#
# def drive_medium(self, lamp_name):
# self.game.lamps[lamp_name].schedule(schedule=0x0f0f0f0f, cycle_seconds=0, now=True)
def drive_flasher(self, data, style='medium',cycle=0,time=2):
if isinstance(data, basestring):
flasher_name=data
else:
flasher_name=data[0]
style = data[1]
time = data[2]
if style == 'slow':
self.game.coils[flasher_name].schedule(schedule=0x00003000, cycle_seconds=cycle, now=True)
elif style == 'medium':
self.game.coils[flasher_name].schedule(schedule=0x30003000, cycle_seconds=cycle, now=True)
elif style == 'fast':
self.game.coils[flasher_name].schedule(schedule=0x11111111, cycle_seconds=cycle, now=True)
elif style == 'super':
self.game.coils[flasher_name].schedule(schedule=0x55555555, cycle_seconds=cycle, now=True)
elif style == 'super2':
self.game.coils[flasher_name].schedule(schedule=0x55055055, cycle_seconds=cycle, now=True)
elif style == 'strobe':
self.game.coils[flasher_name].schedule(schedule=0xeeeeeeee, cycle_seconds=cycle, now=True)
elif style == 'chaos':
self.game.coils[flasher_name].schedule(schedule=0x019930AB, cycle_seconds=cycle, now=True)
elif style == 'fade':
self.game.coils[flasher_name].schedule(schedule=0xAAA99933, cycle_seconds=cycle, now=True)
if time>0:
self.delay(name=flasher_name+'_off', event_type=None, delay=time, handler=self.game.coils[flasher_name].disable)
# def strobe_flasher_set(self,flasher_list,time=0.5):
# timer = 0
# for fname in flasher_list:
# self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time])
# timer+=time
def strobe_flasher_set(self,flasher_list,time=1,overlap=0.2,repeats=1,enable=True):
timer = 0
for i in range(repeats):
for fname in flasher_list:
if enable:
self.delay(name=fname+'strobe', event_type=None, delay=timer, handler=self.drive_flasher, param=[fname,'fast',time+overlap])
timer+=time
else:
self.cancel_delayed(fname+'strobe')
self.game.coils[fname].disable()
def strobe_controlled_flasher_set(self,flasher_list,time=0.1,overlap=0.2,repeats=1,enable=True):
timer = 0
#playfield flashers
sequence=[]
for j in range(repeats):
sequence += flasher_list
for i in range(len(sequence)):
def flash(i,time,delay):
self.delay(delay=delay,handler=lambda:self.game.switched_coils.drive(name=sequence[i],style='fast',time=time+0.1))
flash(i,time,timer)
timer+=time
def drive_led(self,lamp_name,colour):
if colour=='red':
self.led_colour_data(lamp_name,'on','off','off')
elif colour=='pink':
self.led_colour_data(lamp_name,'on','off','med')
elif colour=='magenta':
self.led_colour_data(lamp_name,'on','off','on')
elif colour=='purple':
self.led_colour_data(lamp_name,'med','off','on')
elif colour=='skyblue':
self.led_colour_data(lamp_name,'off','med','on')
elif colour=='blue':
self.led_colour_data(lamp_name,'off','off','on')
elif colour=='cyan':
self.led_colour_data(lamp_name,'off','on','on')
elif colour=='turquoise':
self.led_colour_data(lamp_name,'off','on','med')
elif colour=='green':
self.led_colour_data(lamp_name,'off','on','off')
elif colour=='limegreen':
self.led_colour_data(lamp_name,'med','on','off')
elif colour=='yellow':
self.led_colour_data(lamp_name,'on','on','off')
elif colour=='orange':
self.led_colour_data(lamp_name,'on','med','off')
elif colour=='white':
self.led_colour_data(lamp_name,'on','on','on')
elif colour=='black':
self.led_colour_data(lamp_name,'off','off','off')
def led_colour_data(self,lamp_name,red,blue,green):
data=[red,green,blue]
name=['Red','Green','Blue']
for i in range(len(data)):
if data[i]=='off':
self.game.lamps[lamp_name+name[i]].disable()
elif data[i]=='on':
self.game.lamps[lamp_name+name[i]].enable()
elif data[i]=='med':
self.game.lamps[lamp_name+name[i]].schedule(schedule=0x80808080, cycle_seconds=0, now=True)
# self.game.lamps[lamp_name+name[i]].patter()
| gpl-3.0 | 1,920,861,269,690,406,000 | 44.15847 | 148 | 0.563839 | false | 3.418701 | false | false | false |
Skyeouyang/Text-Analytics-Project | lexicon analysis.py | 1 | 2398 | #######################################
##Author Skye Ouyang
##Date 19th Apr.
#######################################
import glob
import os
def IsNotNull(value):
return value is not None and len(value) > 0
#create weapon list
dict_weapon = []
weapons = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/weapon_words.txt','r')
for weapon in weapons:
t = weapon.strip().lower()
if (IsNotNull(t)):
dict_weapon.append(t)
weapons.close()
#create bloody words list
dict_bloody = []
bloodys = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/bloody_words.txt','r')
for bloody in bloodys:
b = bloody.strip().lower()
if (IsNotNull(b)):
dict_bloody.append(b)
#create mysterious words list
dict_mysterious = []
mysteriouss = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/lexicon/mysterious_words.txt','r')
for mysterious in mysteriouss:
m = mysterious.strip().lower()
if (IsNotNull(m)):
dict_mysterious.append(m)
#input data
path ="D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/low_score_novel"
allFiles = glob.glob(path + "/*.txt")
#file = open('D:/1. msba/Trimester II Jan.2017-May.2017/text analytics/project/dataset/high_score_novel/01. The Girl with the Dragon Tattoo.txt','r')
weapon_cnt = []
bloody_cnt = []
mysterious_cnt = []
for file in allFiles:
with open(file) as fle:
fiction = fle.read()
# set for loop
wea_cnt = 0
blo_cnt = 0
mys_cnt = 0
# count of weapon words
for word in dict_weapon:
if (word in fiction):
wea_cnt = wea_cnt + 1
for word in dict_bloody:
if (word in fiction):
blo_cnt = blo_cnt + 1
for word in dict_mysterious:
if (word in fiction):
mys_cnt = mys_cnt + 1
print (wea_cnt, blo_cnt , mys_cnt)
# write into list
weapon_cnt.append(wea_cnt)
bloody_cnt.append(blo_cnt)
mysterious_cnt.append(mys_cnt)
weapon_cnt
'''
for file in allFiles:
with open (file) as fle:
blo_cnt = 0
fiction = fle.read()
'''
#file_name = os.path.splitext(path + '/*.txt')[0]
#print ('The size of %s is ' % (file_name) + str(len(fiction)))
| apache-2.0 | -6,619,393,933,516,462,000 | 27.604938 | 149 | 0.582569 | false | 2.960494 | false | false | false |
gandalfcode/gandalf | examples/example09.py | 1 | 1749 | #==============================================================================
# example09.py
# Create initial conditions for pure N-body simulation inside the python
# script, and then run the simulation to completion while plotting results.
#==============================================================================
from gandalf.analysis.facade import *
import numpy as np
import time
# Create empty numpy arrays for setting star initial conditions
Nstar = 3
x = np.zeros(Nstar)
y = np.zeros(Nstar)
vx = np.zeros(Nstar)
vy = np.zeros(Nstar)
m = np.zeros(Nstar)
h = 0.000001*np.ones(Nstar)
# Set values for each star individually (Note all velocities initially zero)
m[0] = 3.0; x[0] = 1.0; y[0] = 3.0
m[1] = 4.0; x[1] = -2.0; y[1] = -1.0
m[2] = 5.0; x[2] = 1.0; y[2] = -1.0
# Create new 1D simulation object and set parameters
sim = newsim(ndim=2,sim='nbody')
sim.SetParam('ic','python')
sim.SetParam('nbody','hermite4ts')
sim.SetParam('sub_systems',0)
sim.SetParam('Npec',3)
sim.SetParam('Nlevels',1)
sim.SetParam('Nstar',Nstar)
sim.SetParam('tend',80.0)
sim.SetParam('dt_snap',1.0)
sim.SetParam('noutputstep',128)
sim.SetParam('ndiagstep',2048)
sim.SetParam('dimensionless',1)
sim.SetParam('run_id','BURRAU1')
sim.SetParam('out_file_form','su')
# Call setup routines and import particle data
sim.PreSetupForPython()
sim.ImportArray(x,'x','star')
sim.ImportArray(y,'y','star')
sim.ImportArray(vx,'vx','star')
sim.ImportArray(vy,'vy','star')
sim.ImportArray(m,'m','star')
sim.ImportArray(h,'h','star')
sim.SetupSimulation()
# Plot the density of all particles near the shock
plot("x","y",type="star")
limit("x",-30.0,30.0,window="all")
limit("y",-20.0,40.0,window="all")
# Run simulation and save plot to file
run()
block()
| gpl-2.0 | -2,850,669,717,202,946,000 | 29.684211 | 79 | 0.63522 | false | 2.867213 | false | false | false |
Sjc1000/PyRC | UI/Disabled/FriendsList.py | 1 | 2227 | #!/usr/bin/env python3
from gi.repository import Gtk, Gdk
import json
class FriendsList():
servers = {}
active_server = None
def __init__(self, MainWindow):
self.MainWindow = MainWindow
self.position = [8, 5, 1, 4]
def prebuild(self):
self.MainWindow.ui_plugins['UserList'].position = (8, 0, 1, 5)
return None
def build(self):
self.scroll_window = Gtk.ScrolledWindow()
self.list = Gtk.ListStore(str, str)
self.view = Gtk.TreeView(self.list)
self.view.set_activate_on_single_click(True)
self.view.set_hexpand(True)
self.view.connect('row-activated', self.clicked)
text_render = Gtk.CellRendererText()
username = Gtk.TreeViewColumn('Friends', text_render, text=0, foreground=1)
self.view.append_column(username)
self.scroll_window.add(self.view)
self.MainWindow.grid.attach(self.scroll_window, *self.position)
return None
def clicked(self, TreeView, TreePath, TreeViewColumn):
print('User list clicked')
return None
def add_friend(self, connection, nickname):
connection.send('MONITOR + ' + nickname)
self.servers[connection.server]['friends'][nickname] = {'iter': None, 'online': False}
if connection.server == self.active_server:
iter = self.list.append([nickname, 'grey'])
self.servers[connection.server]['friends'][nickname]['iter'] = iter
return None
def activate_path(self, server, channel, clicked=False):
self.active_server = server
#redraw
return None
def on376(self, connection, *junk):
with open('UI/friends.json', 'r') as ffile:
friends = json.loads(ffile.read())
if connection.server not in friends:
return None
self.servers[connection.server] = {'friends': {}}
for nickname in sorted(friends[connection.server]):
self.add_friend(connection, nickname)
connection.send('MONITOR s')
return None
def on730(self, connection, host, nickname, uhost):
if nickname == connection.nickname:
return None
print( uhost )
return None | gpl-2.0 | -5,208,045,553,747,212,000 | 32.253731 | 94 | 0.619668 | false | 3.955595 | false | false | false |
wujuguang/sqlalchemy | lib/sqlalchemy/dialects/postgresql/pygresql.py | 1 | 8129 | # postgresql/pygresql.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pygresql
:name: pygresql
:dbapi: pgdb
:connectstring: postgresql+pygresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://www.pygresql.org/
.. note::
The pygresql dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
dialect is psycopg2.
""" # noqa
import decimal
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import PGCompiler
from .base import PGDialect
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import util
from ...sql.elements import Null
from ...types import JSON as Json
from ...types import Numeric
class _PGNumeric(Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not isinstance(coltype, int):
coltype = coltype.oid
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# PyGreSQL returns Decimal natively for 1700 (numeric)
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# PyGreSQL returns float natively for 701 (float8)
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if not dialect.has_native_hstore:
return super(_PGHStore, self).bind_processor(dialect)
hstore = dialect.dbapi.Hstore
def process(value):
if isinstance(value, dict):
return hstore(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_hstore:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSON, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSONB, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not dialect.has_native_uuid:
return super(_PGUUID, self).bind_processor(dialect)
uuid = dialect.dbapi.Uuid
def process(value):
if value is None:
return None
if isinstance(value, (str, bytes)):
if len(value) == 16:
return uuid(bytes=value)
return uuid(value)
if isinstance(value, int):
return uuid(int=value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_uuid:
return super(_PGUUID, self).result_processor(dialect, coltype)
if not self.as_uuid:
def process(value):
if value is not None:
return str(value)
return process
class _PGCompiler(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
return text.replace("%", "%%")
class _PGIdentifierPreparer(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class PGDialect_pygresql(PGDialect):
driver = "pygresql"
statement_compiler = _PGCompiler
preparer = _PGIdentifierPreparer
@classmethod
def dbapi(cls):
import pgdb
return pgdb
colspecs = util.update_copy(
PGDialect.colspecs,
{
Numeric: _PGNumeric,
HSTORE: _PGHStore,
Json: _PGJSON,
JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
def __init__(self, **kwargs):
super(PGDialect_pygresql, self).__init__(**kwargs)
try:
version = self.dbapi.version
m = re.match(r"(\d+)\.(\d+)", version)
version = (int(m.group(1)), int(m.group(2)))
except (AttributeError, ValueError, TypeError):
version = (0, 0)
self.dbapi_version = version
if version < (5, 0):
has_native_hstore = has_native_json = has_native_uuid = False
if version != (0, 0):
util.warn(
"PyGreSQL is only fully supported by SQLAlchemy"
" since version 5.0."
)
else:
self.supports_unicode_statements = True
self.supports_unicode_binds = True
has_native_hstore = has_native_json = has_native_uuid = True
self.has_native_hstore = has_native_hstore
self.has_native_json = has_native_json
self.has_native_uuid = has_native_uuid
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["host"] = "%s:%s" % (
opts.get("host", "").rsplit(":", 1)[0],
opts.pop("port"),
)
opts.update(url.query)
return [], opts
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
if not connection:
return False
try:
connection = connection.connection
except AttributeError:
pass
else:
if not connection:
return False
try:
return connection.closed
except AttributeError: # PyGreSQL < 5.0
return connection._cnx is None
return False
dialect = PGDialect_pygresql
| mit | -2,064,211,738,100,849,400 | 29.56015 | 97 | 0.570058 | false | 4.282929 | false | false | false |
LockScreen/Backend | venv/lib/python2.7/site-packages/botocore/docs/sharedexample.py | 1 | 9129 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from datetime import datetime
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example['input']
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.writeln('Expected Output:')
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
section.write("'%s'," % str(value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub('^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
| mit | -1,774,796,653,096,055,800 | 40.684932 | 80 | 0.614197 | false | 4.212737 | false | false | false |
disqus/zumanji | src/zumanji/views.py | 1 | 6969 | from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.utils import simplejson
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from functools import wraps
from zumanji.forms import UploadJsonForm
from zumanji.helpers import get_trace_data, get_changes, get_git_changes
from zumanji.models import Project, Build, BuildTag, Test
from zumanji.importer import import_build
NOTSET = object()
def api_auth(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
if request.REQUEST.get('api_key'):
if request.REQUEST['api_key'] != settings.ZUMANJI_CONFIG.get('API_KEY', NOTSET):
return HttpResponseForbidden('Invalid api_key')
return func(request, *args, **kwargs)
return csrf_protect(func)(request, *args, **kwargs)
return csrf_exempt(wrapped)
def index(request):
build_qs = Build.objects.order_by('-revision__datetime', '-datetime').select_related('revision')
project_list = []
# lol O(N)
for project in Project.objects.all():
try:
latest_build = build_qs.filter(project=project)[0]
except IndexError:
latest_build = None
project_list.append((project, latest_build))
return render(request, 'zumanji/index.html', {
'project_list': project_list,
})
def view_project(request, project_label):
project = get_object_or_404(Project, label=project_label)
build_list = list(Build.objects
.filter(project=project)
.order_by('-revision__datetime', '-datetime')
.select_related('revision', 'project'))
return render(request, 'zumanji/project.html', {
'project': project,
'build_list': build_list,
})
def view_tag(request, project_label, tag_id):
project = get_object_or_404(Project, label=project_label)
tag = get_object_or_404(BuildTag, pk=tag_id)
build_list = list(Build.objects
.filter(project=project, tags=tag)
.order_by('-datetime')
.select_related('revision', 'project'))
return render(request, 'zumanji/tag.html', {
'project': project,
'tag': tag,
'build_list': build_list,
})
def view_build(request, project_label, build_id, tag_id=None):
filter_args = dict(project__label=project_label, id=build_id)
tag = None
if tag_id:
tag = get_object_or_404(BuildTag, id=tag_id)
filter_args["tags"] = tag
build = get_object_or_404(Build, **filter_args)
project = build.project
previous_build = build.get_previous_build(tag=tag)
next_build = build.get_next_build(tag=tag)
test_list = list(build.test_set
.filter(parent__isnull=True)
.order_by('-upper90_duration'))
compare_with = request.GET.get('compare_with')
if compare_with:
try:
compare_build = Build.objects.get(project__label=project_label, id=compare_with)
except Build.DoesNotExist:
compare_build = None
else:
compare_build = previous_build
changes = get_changes(compare_build, test_list)
if compare_build:
git_changes = get_git_changes(build, compare_build)
else:
git_changes = None
return render(request, 'zumanji/build.html', {
'project': project,
'tag': tag,
'build': build,
'previous_build': previous_build,
'compare_build': compare_build,
'next_build': next_build,
'test_list': test_list,
'changes': changes,
'git_changes': git_changes,
})
def view_test(request, project_label, build_id, test_label):
test = get_object_or_404(Test, project__label=project_label, build=build_id, label=test_label)
project = test.project
build = test.build
test_list = list(Test.objects.filter(parent=test)
.order_by('-upper90_duration')
.select_related('parent'))
# this is actually a <Test>
previous_test_by_build = test.get_test_in_previous_build()
next_test_by_build = test.get_test_in_next_build()
breadcrumbs = [
(reverse('zumanji:view_build', kwargs={'project_label': project.label, 'build_id': build.id}), 'Build #%s' % build.id)
]
last = ''
for node in test.get_context():
node_label = node.label[len(last):]
breadcrumbs.append(
(reverse('zumanji:view_test', kwargs={
'project_label': project.label,
'build_id': build.id,
'test_label': node.label,
}), node_label)
)
last = node.label + '.' # include the dot
previous_builds = test.get_previous_builds(50)
compare_with = request.GET.get('compare_with')
if compare_with:
try:
compare_build = Build.objects.get(project__label=project_label, id=compare_with)
except Build.DoesNotExist:
compare_build = None
else:
compare_build = previous_test_by_build.build if previous_test_by_build else None
if compare_build:
try:
compare_test = compare_build.test_set.get(label=test.label)
except Test.DoesNotExist:
compare_test = None
git_changes = get_git_changes(build, compare_build)
else:
compare_test = None
git_changes = None
trace_results = get_trace_data(test, compare_test)
if previous_test_by_build:
tests_to_check = test_list
changes = get_changes(compare_build, tests_to_check)
else:
changes = []
return render(request, 'zumanji/test.html', {
'breadcrumbs': breadcrumbs,
'project': project,
'build': build,
'previous_test_by_build': previous_test_by_build,
'next_test_by_build': next_test_by_build,
'previous_builds': previous_builds,
'test': test,
'test_list': test_list,
'changes': changes,
'compare_build': compare_build,
'trace_results': trace_results,
'git_changes': git_changes,
})
@api_auth
@transaction.commit_on_success
def upload_project_build(request, project_label):
project = get_object_or_404(Project, label=project_label)
form = UploadJsonForm(request.POST or None, request.FILES or None)
if form.is_valid():
data = simplejson.loads(request.FILES['json_file'].read())
try:
build = import_build(data, project=project.label, revision=form.cleaned_data.get('revision'))
except Exception, e:
form.errors['json_file'] = unicode(e)
else:
return HttpResponseRedirect(reverse('zumanji:view_build', kwargs={
'project_label': project.label, 'build_id': build.id}))
return render(request, 'zumanji/upload_build.html', {
'project': project,
'form': form,
})
| apache-2.0 | 3,989,766,211,965,808,000 | 31.565421 | 126 | 0.627924 | false | 3.656348 | true | false | false |
ZwEin27/phone-number-matcher | dig_phone_extractor.py | 1 | 23737 | # -*- coding: utf-8 -*-
# @Author: ZwEin
# @Date: 2016-06-21 12:36:47
# @Last Modified by: ZwEin
# @Last Modified time: 2016-09-29 21:54:12
import os
import re
import sys
import json
import copy
import types
import string
import collections
import phonenumbers
from datetime import datetime
from crf_tokenizer import CrfTokenizer
from urlparse import urlparse
from string import maketrans
from phonenumbers.phonenumberutil import NumberParseException
from difflib import SequenceMatcher
def is_valid_datetime(raw, date_format):
try:
datetime.strptime(raw, date_format)
return True
except ValueError:
return False
class Preprocessor():
re_prep = re.compile(r'[\(\)]')
reg_simple_format = [
r'(?:(?<=[ \A\b-\.\?])\d{3}[ \?\.-]\d{3}[ \?\.-]\d{4}(?=[ \Z\b-\.\?]))'
]
re_simple_format = re.compile(r'(?:'+r'|'.join(reg_simple_format)+r')')
datetime_regexes = [
r"(?:\d{2}[ _-]\d{2}[ _-]\d{4})",
r"(?:\d{4}[ _-]\d{2}[ _-]\d{2})"
]
datetime_regex = r"(?:" + r"|".join(datetime_regexes) + ")"
re_datetime_regex = re.compile(datetime_regex)
re_digits_regex = re.compile(r"\d+")
def prep_datetime(self, raw):
m = Preprocessor.re_datetime_regex.findall(raw)
for d in m:
dd = ''.join(Preprocessor.re_digits_regex.findall(d))
if is_valid_datetime(dd, '%Y%m%d') or is_valid_datetime(dd, '%m%d%Y'):
raw = raw.replace(d, "")
return raw
money_regex = r"(?:(?<=[\D])\$\d+(?=[\W_]))"
units = ['lbs', 'kg', 'hour', 'hr', 'hh']
unit_regex = r"(?:\d+[\s\W]*(" + r"|".join(units) + "))"
others_regexes = [
r"24/7",
r"#\d+",
r"\d+\'\d+",
r"(?<=[\W_])\d{5}[\W_]{1,}\d{5}(?=[\W_])",
r"- {1,}\d+$",
r"\d+\%"
]
other_regex = r"(?:" + "|".join(others_regexes) + ")"
all_regexes = [money_regex, unit_regex, other_regex]
all_regex = r"(" + r"|".join(all_regexes) + ")"
re_all_regex = re.compile(all_regex)
def preprocess(self, raw):
raw = raw.lower()
raw = raw.encode('ascii', 'ignore')
raw = self.prep_datetime(raw)
raw = Preprocessor.re_prep.sub(' ', raw)
raw = Preprocessor.re_all_regex.sub('', raw)
raw = Preprocessor.re_simple_format.sub('pnwrapper \g<0> pnwrapper', raw)
return raw
SOURCE_TYPE_TEXT = 'text'
SOURCE_TYPE_URL = 'url'
class Tokenizer():
re_2_digts_only_in_url_regex = re.compile(r'(?<=[-_])\d{2}(?=[_/])')
re_all_alphabet_in_url_regex = re.compile(r'\w+')
def __init__(self, source_type='text'):
self.set_source_type(source_type)
def set_source_type(self, source_type):
"""
'text' or 'url'
"""
st = source_type.lower()
if source_type.lower() not in [SOURCE_TYPE_TEXT, SOURCE_TYPE_URL] :
raise Exception(source_type + ' is not a source type, which should be "text" or "url"')
self.source_type = source_type
def remove_punctuation(self, raw):
return raw.translate(string.maketrans("",""), string.punctuation)
def tokenize(self, raw):
result = None
if self.source_type == SOURCE_TYPE_TEXT:
result = self.tokenize_text(raw)
elif self.source_type == SOURCE_TYPE_URL:
result = self.tokenize_url(raw)
return ' '.join(result.split())
def tokenize_text(self, raw):
t = CrfTokenizer()
t.setRecognizeHtmlEntities(True)
t.setRecognizeHtmlTags(True)
t.setSkipHtmlTags(True)
t.setRecognizePunctuation(True)
tokens = t.tokenize(raw)
tokens = ' '.join(tokens)
tokens = self.remove_punctuation(tokens)
return tokens
def tokenize_url(self, raw):
SEPARATOR = ' '
url_obj = urlparse(raw)
# parse netloc
netloc = url_obj.netloc.split('.')[:-2] # get rid of port numbers, ext and domain name
# parse path
path = url_obj.path
path = Tokenizer.re_2_digts_only_in_url_regex.sub('', path)
path = path.split('/')
content = netloc + path
content = [SEPARATOR.join(Tokenizer.re_all_alphabet_in_url_regex.findall(_)) for _ in content]
# parse params
# url_obj.params
# parse query
# url_obj.query
return ' sep '.join(content)
class Cleaner():
def prep_misspelled_numeral_words(self, raw):
misspelling_dict = {
"th0usand": "thousand",
"th1rteen": "thirteen",
"f0urteen": "fourteen",
"e1ghteen": "eighteen",
"n1neteen": "nineteen",
"f1fteen": "fifteen",
"s1xteen": "sixteen",
"th1rty": "thirty",
"e1ghty": "eighty",
"n1nety": "ninety",
"fourty": "forty",
"f0urty": "forty",
"e1ght": "eight",
"f0rty": "forty",
"f1fty": "fifty",
"s1xty": "sixty",
"zer0": "zero",
"for": "four",
"f0ur": "four",
"f1ve": "five",
"n1ne": "nine",
"0ne": "one",
"too": "two",
"tw0": "two",
"to": "two",
"s1x": "six"
}
for key in misspelling_dict.keys():
raw = raw.replace(key, misspelling_dict[key])
return raw
numbers = ['zero', 'one', 'two', 'three', 'four', 'five', 'siz', 'seven', 'eight', 'nine']
re_twenty_x = re.compile(r"(two|twenty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_thirty_x = re.compile(r"(three|thirty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_forty_x = re.compile(r"(four|forty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_fifty_x = re.compile(r"(five|fifty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_sixty_x = re.compile(r"(six|sixty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_seventy_x = re.compile(r"(seven|seventy[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_eighty_x = re.compile(r"(eight|eighty[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_ninety_x = re.compile(r"(nine|ninety[\W_]+(?=(\d|" + r"|".join(numbers) + ")))")
re_ten = re.compile(r"(?<=[ilo0-9])ten(?=[ \b0-9])")
re_one = re.compile(r'(?:(?<=([0-9yneorxt]| ))one|(?:(?<=[ils])[i]((?=[ils])|$)))')
re_zero = re.compile(r'(?:zero|oh|(?:(?<=[0-9])(o+?))|(?:o(?=[0-9]))|(?:(?<=[o\s])o(?=[o\s])))')
def prep_replace_numeral_words(self, raw):
raw = raw.replace("hundred", "00")
raw = raw.replace("thousand", "000")
raw = raw.replace("eleven", "11")
raw = raw.replace("twelve", "12")
raw = raw.replace("thirteen", "13")
raw = raw.replace("fourteen", "14")
raw = raw.replace("fifteen", "15")
raw = raw.replace("sixteen", "16")
raw = raw.replace("seventeen", "17")
raw = raw.replace("eighteen", "18")
raw = raw.replace("nineteen", "19")
raw = Cleaner.re_twenty_x.sub("2", raw)
raw = Cleaner.re_thirty_x.sub("3", raw)
raw = Cleaner.re_forty_x.sub("4", raw)
raw = Cleaner.re_fifty_x.sub("5", raw)
raw = Cleaner.re_sixty_x.sub("6", raw)
raw = Cleaner.re_seventy_x.sub("7", raw)
raw = Cleaner.re_eighty_x.sub("8", raw)
raw = Cleaner.re_ninety_x.sub("9", raw)
raw = Cleaner.re_ten.sub("10", raw)
raw = Cleaner.re_one.sub("1", raw)
raw = Cleaner.re_zero.sub("0", raw)
raw = raw.replace("twenty", "20")
raw = raw.replace("thirty", "30")
raw = raw.replace("forty", "40")
raw = raw.replace("fifty", "50")
raw = raw.replace("sixty", "60")
raw = raw.replace("seventy", "70")
raw = raw.replace("eighty", "80")
raw = raw.replace("ninety", "90")
return raw
def clean(self, raw):
raw = self.prep_misspelled_numeral_words(raw)
raw = self.prep_replace_numeral_words(raw)
# print raw
return raw
class ZEExtractor():
def __init__(self):
pass
prefix = r'(?:(?<=[\A\b\sa-zA-Z])|^)'
# prefix = r'\b'
# prefix = r'[ ]?'
postfix = r'(?:(?=[\Z\b\sa-zA-Z])|$)'
# postfix = r'\b'
# postfix = r'[ ]?'
phone_number_format_regex = [
r'(?:'+prefix+r"\d{10,13}"+postfix+r')',
r'(?:'+prefix+r"\d{9,10}"+postfix+r')',
r'(?:'+prefix+r"\d{8}[ ]\d{3,4}"+postfix+r')',
r'(?:'+prefix+r"\d{7}[ ]\d{3,4}"+postfix+r')',
r'(?:'+prefix+r"\d{6}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{5}[ ]\d{6}"+postfix+r')',
r'(?:'+prefix+r"\d{5}[ ]\d{4}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{5}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{5}[ ]\d{4}[ ]\d{2}[ ]\d{2}"+postfix+r')',
r'(?:'+prefix+r"\d{4}[ ]\d{4}[ ]\d{2}"+postfix+r')',
r'(?:'+prefix+r"\d{4}[ ]\d{2}[ ]\d{2}[ ]\d{2}[ ]\d{2}"+postfix+r')',
r'(?:'+prefix+r"\d{4}[ ]\d{3}[ ]\d{3}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{7,8}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{4}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{4}[ ]\d{3}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{3}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{2}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{3}[ ]\d{1}[ ]\d{3}"+postfix+r')',
r'(?:'+prefix+r"\d{3}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{2}[ ]\d{4}[ ]\d{4}"+postfix+r')',
r'(?:'+prefix+r"\d{2}[ ]\d{8}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{8}[ ]\d{1}"+postfix+r')', # \d{2}[ ] ...
r'(?:'+prefix+r"\d{1}[ ]\d{3}[ ]\d{3}[ ]\d{3}"+postfix+r')',
r'(?:'+prefix+r"\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}[ ]\d{1}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{2}"+postfix+r')',
r'(?:'+prefix+r"\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}[ ]\d{1}"+postfix+r')'
]
# numbers_regex = r"(?:" + r"|".join(phone_number_format_regex) + r")"
numbers_regex = r"(?:" + r"|".join(phone_number_format_regex) + r")"
re_numbers_regex = re.compile(numbers_regex)
# print numbers_regex
def extract(self, raw):
raw = ZEExtractor.re_numbers_regex.findall(raw)
raw = [''.join(_.split()) for _ in raw if len(_.strip()) >= 10]
return '\t'.join(raw)
class Validator():
re_zero = re.compile(r'0{3,}')
def validate_phone_number_with_coutry_code(self, raw, country_code='US'):
try:
z = phonenumbers.parse(raw, country_code)
except NumberParseException, e:
pass
"""
if e.error_type == NumberParseException.INVALID_COUNTRY_CODE:
# Invalid country code specified
return []
elif e.error_type == NumberParseException.NOT_A_NUMBER:
# The string passed in had fewer than 3 digits in it.
# The number failed to match the regular expression
return []
elif e.error_type == NumberParseException.TOO_SHORT_AFTER_IDD:
# The string started with an international dialing prefix
# but after this was removed, it had fewer digits than any
# valid phone number (including country code) could have.
return []
elif e.error_type == NumberParseException.TOO_SHORT_NSN:
# After any country code has been stripped, the string
# had fewer digits than any valid phone number could have.
return []
elif e.error_type == NumberParseException.TOO_LONG:
# String had more digits than any valid phone number could have
return []
"""
# print e.error_type, e._msg
else:
if phonenumbers.is_possible_number(z) and phonenumbers.is_valid_number(z):
return [raw]
else:
return []
def validate_phone_number(self, raw):
# match all countries if using area_code.get_all_country_iso_two_letter_code()
# may include too short phone numbers if use 'DE'
country_code_list = ['US', 'CN', 'IN', 'UA', 'JP', 'RU', 'IT', 'DE', 'CA', 'TR']
for country_code in country_code_list:
rtn = self.validate_phone_number_with_coutry_code(raw, country_code=country_code)
if rtn:
return rtn
def is_datetime(self, raw):
size = len(raw)
date_format = ''
if size == 14:
return is_valid_datetime(raw, '%Y%m%d%H%M%S')
elif size == 8:
return is_valid_datetime(raw, '%Y%m%d')
elif size == 6:
return is_valid_datetime(raw, '%Y%m%d') or is_valid_datetime(raw, '%H%M%S')
else:
return False
re_num_digits = [
None,
re.compile(r"\d{1}"),
re.compile(r"\d{2}"),
re.compile(r"\d{3}"),
re.compile(r"\d{4}"),
re.compile(r"\d{5}"),
re.compile(r"\d{6}")
]
def is_all_dup_digits(self, raw):
for i in range(1, 6):
rtn = Validator.re_num_digits[i].findall(raw)
if len(raw) % i != 0:
continue
if all(rtn[0] == rest for rest in rtn):
return True
return False
re_start_zero = re.compile(r'^0+')
def suggest_most_overlap(self, extracted_phone_list):
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
potential_invalid, potential_valid = [], []
for pn in extracted_phone_list:
if len(pn) == 10:
potential_valid.append(pn)
else:
potential_invalid.append(pn)
ans = list(potential_valid)
for pi in potential_invalid:
if any(similar(pi, pv) < .3 for pv in potential_valid):
ans.append(pi)
return ans
def validate(self, raw):
ans = []
for nums in raw.split('\t'):
nums = nums.strip()
nums = Validator.re_start_zero.sub('', nums)
if len(nums) > 16:
continue
if len(Validator.re_zero.findall(nums)):
continue
if self.is_all_dup_digits(nums):
continue
if self.is_datetime(nums):
continue
ans += [nums]
# valid = self.validate_phone_number(nums)
# if valid:
# ans.extend(valid)
ans = list(set(ans))
ans = self.suggest_most_overlap(ans)
return ' '.join(ans)
class Normalizer():
# try extracting from this one live escort reviews pnwrapper 754 307 7279 pnwrapper 49 91 3524432077 you won t be disappointedangel
re_digits = re.compile(r'(?:(?<=[ \s\b\Aa-zA-Z])[\d ]+(?=[ \s\b\Za-zA-Z]))')
def normalize(self, cleaned_output, uncleaned_output, output_format='list'):
# print [_.strip() for _ in Normalizer.re_digits.findall(tokenized_content) if _.strip() != '']
if output_format == 'obfuscation':
output = []
for co in cleaned_output.split():
phonenum = {}
phonenum['telephone'] = co
if co in uncleaned_output:
phonenum['obfuscation'] = 'False'
else:
phonenum['obfuscation'] = 'True'
output.append(phonenum)
return output
else:
return cleaned_output.split()
class PhoneNumberExtractor(object):
PN_OUTPUT_FORMAT_LIST = 'list'
PN_OUTPUT_FORMAT_OBFUSCATION = 'obfuscation'
def __init__(self, _output_format='list'):
self.preprocessor = Preprocessor()
self.tokenizer = Tokenizer(source_type='text')
self.extractor = ZEExtractor()
self.cleaner = Cleaner()
self.validator = Validator()
self.normalizer = Normalizer()
self.set_output_format(_output_format)
def set_output_format(self, _output_format):
# 1. list, 2. obfuscation
if _output_format not in [PhoneNumberExtractor.PN_OUTPUT_FORMAT_LIST, PhoneNumberExtractor.PN_OUTPUT_FORMAT_OBFUSCATION]:
raise Exception('output_format should be "list" or "obfuscation"')
self.output_format = _output_format
def do_process(self, content, source_type='text', do_preprocess=True, do_tokenize=True, do_clean=True, do_extract=True, do_validate=True):
if do_preprocess:
content = self.preprocessor.preprocess(content)
if do_tokenize:
self.tokenizer.set_source_type(source_type)
content = self.tokenizer.tokenize(content)
if do_clean:
content = self.cleaner.clean(content)
if do_extract:
content = self.extractor.extract(content)
if do_validate:
content = self.validator.validate(content)
return content
def match(self, content, source_type='text'):
cleaned_ans = self.do_process(content, source_type=source_type)
uncleaned_ans = self.do_process(content, source_type=source_type, do_clean=False)
return self.normalizer.normalize(cleaned_ans, uncleaned_ans, output_format=self.output_format)
########################################################################
# URLExtractor
########################################################################
import esm
import idna
import tldextract
re_dot = re.compile(r'(?:\s+?dot\s+?)', re.IGNORECASE)
reg_url_charactor = '[a-z0-9-.]'
re_url_charactor = re.compile(reg_url_charactor, re.IGNORECASE)
re_pretld = re.compile(reg_url_charactor+'+?$', re.IGNORECASE)
re_posttld = re.compile(':?[0-9]*[/[!#$&-;=?a-z_]+]?', re.IGNORECASE)
class URLExtractor(object):
def __init_tld_index():
tldindex = esm.Index()
tlds = (tldextract.TLDExtract()._get_tld_extractor().tlds)
ldindex = esm.Index()
for tld in tlds:
tldindex.enter('.' + tld.encode('idna'))
tldindex.fix()
return tldindex
tldindex = __init_tld_index()
@staticmethod
def preprocess(text):
def clean(text):
text = re_dot.sub('.', text)
return text
text = clean(text)
return text
@staticmethod
def query(text):
ans = []
exts = URLExtractor.tldindex.query(text)
for ext in exts:
pretld, posttld = None, None
url = ''
tld = ext[1]
startpt, endpt = ext[0][0], ext[0][1]
if len(text) > endpt:
nextcharacter = text[endpt]
if re_url_charactor.match(nextcharacter):
continue
posttld = re_posttld.match(text[endpt:])
pretld = re_pretld.search(text[:startpt])
if pretld:
url = pretld.group(0)
startpt -= len(pretld.group(0))
url += tld
if posttld:
url += posttld.group(0)
endpt += len(posttld.group(0))
url = url.rstrip(',.')
ans.append(url)
ans = list(set([_ for _ in ans if _]))
return ans
@staticmethod
def extract(text):
text = text.encode('ascii', 'ignore')
text= URLExtractor.preprocess(text)
ans = URLExtractor.query(text)
return ans
# in production
# from digExtractor.extractor import Extractor
# in test
class Extractor:
def extract(doc):
raise NotImplementedError( "Need to implement extract function" )
# should create a new dictionary each time
def get_metadata():
raise NotImplementedError( "Need to implement get_metadata function" )
def set_metadata():
raise NotImplementedError( "Need to implement set_metadata function" )
def get_renamed_input_fields(self):
raise NotImplementedError( "Need to implement get_renamed_input_fields function" )
def set_renamed_input_fields(self, renamed_input_fields):
if not (isinstance(renamed_input_fields, basestring) or isinstance(renamed_input_fields, types.ListType)):
raise ValueError("renamed_input_fields must be a string or a list")
self.renamed_input_fields = renamed_input_fields
return self
class PhoneExtractor(Extractor):
def __init__(self):
self.renamed_input_fields = '' # ? renamed_input_fields
def extract(self, doc):
urls = URLExtractor.extract(doc)
extractor = PhoneNumberExtractor()
extracts = []
for url in urls:
extracts += extractor.match(url, source_type='url')
doc = doc.replace(url, '')
extracts += extractor.match(doc, source_type='text')
return extracts
def get_metadata(self):
return copy.copy(self.metadata)
def set_metadata(self, metadata):
self.metadata = metadata
return self
def get_renamed_input_fields(self):
return self.renamed_input_fields
def set_renamed_input_fields(self, renamed_input_fields):
if not (isinstance(renamed_input_fields, basestring) or isinstance(renamed_input_fields, types.ListType)):
raise ValueError("renamed_input_fields must be a string or a list")
self.renamed_input_fields = renamed_input_fields
return self
if __name__ == '__main__':
doc = "71857376 71857376718 test 71857376719 718573767185 71837376718 71981090718 718573767198 719810907185 71857376150 1171857376 http://costarica.backpage.com/BodyRubs/hoy-cerramos-a-las-11-71857376/2909373 Sexy new girl in town searching for a great date wiff u Naughty fresh girl here searching 4 a great date wiff you Sweet new girl in town seeking for a good date with u for80 2sixseven one9zerofor 90hr incall or out call"
pe = PhoneExtractor()
print pe.extract(doc)
"""
# Samples
# from phone_number_extractor import PhoneNumberExtractor
extractor = PhoneNumberExtractor()
url_string = "http://costarica.backpage.com/BodyRubs/hoy-cerramos-a-las-11-71857376/2909373"
url_phone_numbers = extractor.match(url_string, source_type='url')
print url_phone_numbers
# text_string = "Sexy new girl in town searching for a great date wiff u Naughty fresh girl here searching 4 a great date wiff you Sweet new girl in town seeking for a good date with u for80 2sixseven one9zerofor 90hr incall or out call"
text_string = "71857376 71857376718 test 71857376719 718573767185 71837376718 71981090718 718573767198 719810907185 71857376150 1171857376"
text_phone_numbers = extractor.match(text_string, source_type='text')
print text_phone_numbers
"""
| apache-2.0 | -8,800,745,410,716,933,000 | 34.694737 | 433 | 0.532376 | false | 3.109786 | false | false | false |
kobejean/tensorflow | tensorflow/contrib/distribute/python/tpu_strategy.py | 1 | 20404 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import one_device_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import device_util
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import nest
_TPU_INITIALIZE_SYSTEM_COLLECTION = "TPU_STRATEGY_INITIALIZE"
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
# TODO(jhseu): Deduplicate with MirroredStrategy?
def _create_tpu_mirrored_variable(devices, real_mirrored_creator, *args,
**kwargs): # pylint: disable=g-missing-docstring
# Figure out what collections this variable should be added to.
# We'll add the TPUMirroredVariable to those collections instead.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# TODO(jhseu): Should we have different behavior for different
# synchronization settings?
# Get aggregation value
# TODO(jhseu): Support aggregation in a tower context.
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in [
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_TOWER,
]:
raise ValueError("Invalid variable aggregation mode: {} for variable: {}"
.format(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
index = real_mirrored_creator(devices, *args, **kwargs)
result = values.TPUMirroredVariable(index, index[devices[0]], aggregation)
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in index.values():
l.remove(v)
g.add_to_collections(collections, result)
return result
# TODO(jhseu): Stop inheriting from OneDeviceStrategy.
class TPUStrategy(one_device_strategy.OneDeviceStrategy):
"""Experimental TPU distribution strategy implementation."""
def __init__(self, tpu_cluster_resolver, steps_per_run, num_cores=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
num_cores: Number of cores to use on the TPU. If None specified, then
auto-detect the cores and topology of the TPU system.
"""
# TODO(sourabhbajaj): OneDeviceStrategy should be initialized with the
# master node fetched from the cluster resolver.
super(TPUStrategy, self).__init__('/device:CPU:0')
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
# TODO(sourabhbajaj): Change this from num_cores to metadata_override
self._num_cores_override = num_cores
# TODO(jhseu): Switch to DeviceAssignment to support pods and model
# parallelism.
device_map = {d.name: i for i, d in enumerate(self._tpu_metadata.devices)
if "device:TPU:" in d.name}
self._device_index = values.PerDevice(device_map)
self._tpu_devices = sorted(device_map.keys())
# Only create variables for the number of towers we're running.
self._tpu_devices = self._tpu_devices[:self.num_towers]
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
def _get_enqueue_op_per_host(self, host_id, iterator, input_shapes,
iterations):
"""Create an enqueue op for a single host identified using host_id.
The while_loop op returned will run `iterations` times and in each run
enqueue batches for each shard.
Args:
host_id: integer, id of the host to run the enqueue ops on.
iterator: `tf.data` iterator to read the input data.
input_shapes: shape of inputs to be enqueue on the queue. This is same as
the value of `nest.flatten(iterator.output_shapes)`.
iterations: integer, number of iterations to be run; determines the
number of batches to be enqueued.
Returns:
while_loop_op running `iterations` times; in each run we enqueue a batch
on the infeed queue from the host with id `host_id` for each device shard.
"""
host = self.get_host_cpu_device(host_id)
def _infeed_enqueue_ops_fn():
"""Enqueue ops for one iteration."""
control_deps = []
sharded_inputs = []
enqueue_ops = []
with ops.device(host):
for _ in range(self.num_towers_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
inputs = nest.flatten(iterator.get_next())
control_deps.extend(inputs)
sharded_inputs.append(inputs)
for core_id, shard_input in enumerate(sharded_inputs):
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=shard_input,
shapes=input_shapes,
device_ordinal=core_id))
return enqueue_ops
def enqueue_ops_loop_body(i):
"""Callable for the loop body of the while_loop instantiated below."""
with ops.control_dependencies(_infeed_enqueue_ops_fn()):
return i + 1
with ops.device(host):
enqueue_op_per_host = control_flow_ops.while_loop(
lambda i: i < iterations,
enqueue_ops_loop_body,
[constant_op.constant(0)],
parallel_iterations=1)
return enqueue_op_per_host
def distribute_dataset(self, dataset_fn):
# TODO(priyag): Perhaps distribute across cores here.
return self._call_dataset_fn(dataset_fn)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values=None):
shapes = nest.flatten(iterator.output_shapes)
if any([not s.is_fully_defined() for s in shapes]):
raise ValueError(
'TPU currently requires fully defined shapes. Either use '
'set_shape() on the input tensors or use '
'dataset.batch(..., drop_remainder=True).')
types = nest.flatten(iterator.output_types)
enqueue_ops = [
self._get_enqueue_op_per_host(host_id, iterator, shapes, iterations)
for host_id in range(self.num_hosts)]
def dequeue_fn():
dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
return nest.pack_sequence_as(iterator.output_shapes, dequeued)
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def run_fn(*args, **kwargs):
"""Single step on the TPU device."""
del args, kwargs
fn_inputs = dequeue_fn()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, *fn_inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# TODO(sourabhbajaj): The input to while loop should be based on the output
# type of the step_fn
def iterate_on_tpu():
return training_loop.repeat(iterations, run_fn, initial_loop_values)
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
replicate_inputs = [[]] * self.num_towers
replicate_outputs = tpu.replicate(iterate_on_tpu, replicate_inputs)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops)
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [x for x in replicate_outputs
if not isinstance(x, ops.Operation)]
# Outputs are currently of the structure (grouped by device)
# [[output0_device0, output1_device0, output2_device0],
# [output0_device1, output1_device1, output2_device1]]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
last_step_tensor_outputs = [list(x) for x in zip(*last_step_tensor_outputs)]
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been aggregated, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
if aggregation is not variables_lib.VariableAggregation.NONE:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_tower(self, fn, *args, **kwargs):
# TODO(jhseu): Consider making it so call_for_each_tower implies that we're
# in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
kwargs.pop('run_concurrently', None)
with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access
return fn(*args, **kwargs)
def initialize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
# TODO(jhseu): We need this hack because DistributionStrategies must be
# pickleable for copy.deepcopy(). Remove when initialize_system goes away.
graph = ops.get_default_graph()
tpu_init = graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION)
if tpu_init:
return tpu_init
graph.add_to_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION,
tpu.initialize_system())
return graph.get_collection(_TPU_INITIALIZE_SYSTEM_COLLECTION)
def finalize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.shutdown_system()]
def _get_devices_from(self, colocate_with=None):
# TODO(jhseu): Change this when we support model parallelism.
return self._tpu_devices
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
index = {}
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on towers with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Initialize replicas with the same value:
if context.executing_eagerly():
kwargs["initial_value"] = array_ops.identity(
index[devices[0]].value())
else:
def initial_value_fn(device=d):
with ops.device(device):
return array_ops.identity(index[devices[0]].initial_value)
kwargs["initial_value"] = initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
index[d] = v
return index
return _create_tpu_mirrored_variable(devices, _real_mirrored_creator, *args,
**kwargs)
def _reduce(self, aggregation, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if aggregation == vs.VariableAggregation.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self.num_towers)
elif aggregation != vs.VariableAggregation.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
# Validate that the destination is same as the host device
# Note we don't do this when in replicate context as the reduction is
# performed on the TPU device itself.
devices = cross_tower_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
self.get_host_cpu_device(0))
else:
raise ValueError('Multiple devices are not supported for TPUStrategy')
if aggregation == vs.VariableAggregation.ONLY_FIRST_TOWER:
return value[0]
output = math_ops.add_n(value)
if aggregation == vs.VariableAggregation.MEAN:
return output * (1. / len(value))
return output
def _update(self, var, fn, *args, **kwargs):
# TODO(jhseu): Consider supporting grouped==False.
assert isinstance(var, values.TPUMirroredVariable)
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
return fn(var, *args, **kwargs)
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = {}
for d, v in var._index.items(): # pylint: disable=protected-access
name = "update_%d" % self._device_index.get(d)
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates[d] = fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs))
# Make a single control dependency to keep the variables mirrored. If one
# assignment is fetched, then run all assignments.
sorted_keys = sorted(updates.keys())
update_tuple = control_flow_ops.tuple([updates[d] for d in sorted_keys])
for i, d in enumerate(sorted_keys):
updates[d] = update_tuple[i]
return values.regroup(updates, values.Mirrored)
def read_var(self, var):
assert isinstance(var, values.TPUMirroredVariable)
return var.read_value()
def _unwrap(self, value):
if isinstance(value, list):
return value
return [value]
@property
def num_towers(self):
return self._num_cores_override or self._tpu_metadata.num_cores
@property
def num_hosts(self):
return self._tpu_metadata.num_hosts
@property
def num_towers_per_host(self):
return self._tpu_metadata.num_of_cores_per_host
@property
def between_graph(self):
return False
@property
def should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def get_host_cpu_device(self, host_id):
if self._tpu_cluster_resolver.get_master() in ('', 'local'):
return '/replica:0/task:0/device:CPU:0'
job_name = self._tpu_cluster_resolver.get_job_name() or 'tpu_worker'
return '/job:%s/task:%d/device:CPU:0' % (job_name, host_id)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
| apache-2.0 | 6,564,893,554,403,699,000 | 40.897331 | 111 | 0.680259 | false | 3.88944 | false | false | false |
ladybug-tools/honeybee | honeybee_plus/utilcol.py | 1 | 1078 | """A collection of useful utilities for Honeybee"""
import uuid
import re
def random_name(shorten=True):
"""Generate a random name as a string using uuid.
Args:
shorten: If True the name will be the first to segment of uuid.
"""
if shorten:
return '-'.join(str(uuid.uuid4()).split('-')[:2])
else:
return str(uuid.uuid4())
def check_name(name):
"""Check if a name is a valid honeybee name.
A valid name can only have alphabet, digits, - and _.
"""
name = name.encode('utf-8')
try:
match = re.match(b"^[.A-Za-z0-9_-]*$", name)
except TypeError:
match = re.match(r"^[.A-Za-z0-9_-]*$", name)
if match:
return True
else:
raise ValueError(
'Invalid input name: ({}).'
' Name can only contain letters, numbers,'
' dots, underscores and dashes.'.format(name)
)
if __name__ == '__main__':
check_name('should_be_fine')
# check_name('also-fine')
check_name('this.is.also.fine.1234')
# check_name('not good')
| gpl-3.0 | 1,852,447,149,315,065,000 | 24.069767 | 71 | 0.56308 | false | 3.511401 | false | false | false |
zjj/trac_hack | sample-plugins/HelloWorld.py | 1 | 2140 | """Example macro."""
revision = "$Rev: 6326 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-0.12.2/sample-plugins/HelloWorld.py $"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
from trac.util import escape
def execute(hdf, txt, env):
# Currently hdf is set only when the macro is called
# From a wiki page
if hdf:
hdf['wiki.macro.greeting'] = 'Hello World'
# args will be `None` if the macro is called without parenthesis.
args = txt or 'No arguments'
# then, as `txt` comes from the user, it's important to guard against
# the possibility to inject malicious HTML/Javascript, by using `escape()`:
return 'Hello World, args = ' + escape(args)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from trac.wiki.macros import WikiMacroBase
class HelloWorldMacro(WikiMacroBase):
"""Simple HelloWorld macro.
Note that the name of the class is meaningful:
- it must end with "Macro"
- what comes before "Macro" ends up being the macro name
The documentation of the class (i.e. what you're reading)
will become the documentation of the macro, as shown by
the !MacroList macro (usually used in the TracWikiMacros page).
"""
def expand_macro(self, formatter, name, args):
"""Return some output that will be displayed in the Wiki content.
`name` is the actual name of the macro (no surprise, here it'll be
`'HelloWorld'`),
`args` is the text enclosed in parenthesis at the call of the macro.
Note that if there are ''no'' parenthesis (like in, e.g.
[[HelloWorld]]), then `args` is `None`.
"""
return 'Hello World, args = ' + unicode(args)
# Note that there's no need to HTML escape the returned data,
# as the template engine (Genshi) will do it for us.
# --
# ---- (reuse for your own macro) ----
| bsd-3-clause | 5,799,304,578,152,899,000 | 31.923077 | 97 | 0.649533 | false | 3.721739 | false | false | false |
rymate1234/rymate-blog | migrations/versions/413f129e8b07_.py | 1 | 1535 | """empty message
Revision ID: 413f129e8b07
Revises: None
Create Date: 2014-05-02 08:09:09.906725
"""
# revision identifiers, used by Alembic.
revision = '413f129e8b07'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('users')
### end Alembic commands ###
| bsd-3-clause | 5,584,822,916,619,234,000 | 30.979167 | 65 | 0.663192 | false | 3.449438 | false | false | false |
zdomjus60/astrometry | tools.py | 1 | 10051 | # -*- coding: utf-8 -*-
""" helper functions for time management
"""
import math
def sin(x):
return math.sin(math.radians(x))
def cos(x):
return math.cos(math.radians(x))
def atan2(y , x):
return math.degrees(math.atan2(y, x))
def reduce360(x):
return x % 360.0
def dms2ddd(hour, minute, second):
""" from sexagesimal to decimal """
return hour+minute/60.0+second/3600.0
def ddd2dms(dec_hour):
""" from decimal to sexagesimal representation of hours and angles."""
if dec_hour < 0:
sign = -1
dec_hour *= sign
else:
sign = 1
total_seconds = int(dec_hour * 3600.0+.5)
seconds = total_seconds % 60
total_minutes = int((total_seconds - seconds)/60.0)
minutes = total_minutes % 60
hours = int((total_minutes - minutes)/60.0)
return (hours * sign, minutes * sign, seconds * sign)
def cal2jul(year, month, day, hour=0, minute=0, second=0):
""" converts calendar date to julian date
this routine and the following are built following Duffet Smith /Zwart instructions
as given in Peter Duffett-Smith-Zwart Practical Astronomy with your Calculator or Spreadsheet
Fourth Edition, Cambridge University Press, Fourth Ed. 2011
For an easier use of the function, hours minutes and seconds are defaulted to 0, so it's
not necessary to give them as parameters when the hour is 00:00:00
"""
month2 = month
year2 = year
if month2 <= 2:
year2 -= 1
month2 += 12
else:
pass
if (year*10000 + month*100 + day) >= 15821015:
a = math.trunc(year2/100.0)
b = 2 - a + math.trunc(a/4.0)
else:
a = 0
b = 0
if year < 0:
c = math.trunc((365.25 * year2)-0.75)
else:
c = math.trunc(365.25 * year2)
d = math.trunc(30.6001 *(month2 + 1))
return b + c + d + day + hour / 24.0 + minute / 1440.0 + second / 86400.0 + 1720994.5
def jul2cal(jd):
""" converts julian date to calendar date """
jd += 0.5
i = math.modf(jd)[1]
f = math.modf(jd)[0]
if i > 2299160:
a = math.trunc((i-1867216.25)/36524.25)
b = i + a - math.trunc(a/4)+1
else:
b = i
c = b + 1524
d = math.trunc((c-122.1)/365.25)
e = math.trunc(365.25 * d)
g = math.trunc((c-e)/30.6001)
day = c-e+f-math.trunc(30.6001*g)
if g < 13.5:
month = g - 1
else:
month = g - 13
if month > 2.5:
year = d - 4716
else:
year = d - 4715
hours_frac = math.modf(day)[0]*24
day = int(day)
hour, minute, second = ddd2dms(hours_frac)
return (year, month, day, hour, minute, second)
def day_of_the_week(year, month, day):
""" given a calendar date, the routine returns a tuple with the Day Of The Week in number and in plaintext
0 for Sunday 1 for Monday and so on up to 6 Saturday
"""
doth = {0:'Sunday', 1:'Monday', 2:'Tuesday',
3:'Wednesday', 4:'Thursday', 5:'Friday',
6:'Saturday'}
jd = cal2jul(year, month, day, 0, 0, 0)
a = (jd+1.5)/7
f = math.trunc((a % 1)*7 +.5)
return (f,doth[f])
def lt2ut(year, month, day, hour=0, minute=0, second=0, timezone=0, DS=0):
""" Given, for a location on the Earth,a date, a time, a timezone (East + West - in hours) and the Daylight
Savings (0 normal time 1 Daylight Savings), this routine gives back a calendar date in Universal Time
representation (year, month, day, hour, minute, second).
It aims to restore a common date and time for all places in the Earth. Timezone and
Daylight Savings can be automized knowing the location using the pytz module (Olson
database)
"""
ut = dms2ddd(hour,minute,second) - timezone - DS
greenwich_calendar_date = day + ut/24
jd = cal2jul(year, month, greenwich_calendar_date)
greenwich_calendar_date = jul2cal(jd)
return greenwich_calendar_date
def ut2lt(year, month, day, hour=0, minute=0, second=0, timezone=0, DS=0):
""" Given a date, a time for Greenwich in UT format this routine gives back a calendar date
in local time representation (year, month, day, hour, minute, second).
It's the inverse function of the previous formula
"""
lt = dms2ddd(hour,minute,second) + timezone +DS
local_calendar_date = day + lt/24
jd = cal2jul(year, month, local_calendar_date)
local_calendar_date = jul2cal(jd)
return local_calendar_date
def ut2gst(year, month, day, hour, minute, second):
""" Sidereal time is a time-keeping system astronomers use to keep track of the direction to point
their telescopes to view a given star in the night sky.
Briefly, sidereal time is a "time scale that is based on the Earth's rate of rotation measured
relative to the fixed stars." (source Wikipedia)
This routine converts Universal Time to Sidereal Time for Greenwich (Greenwich Sidereal Time)
"""
jd = cal2jul(year, month, day)
S = jd - 2451545.0
T = S/36525.0
T0 = (6.697374558 + (2400.051336 * T)+ 0.000025862 *T*T) % 24
UT = dms2ddd(hour, minute, second)*1.002737909
GST = ddd2dms((UT + T0) % 24)
return GST
def gst2ut( year, month, day, hour, minute, second):
""" Inverse of the previous function
"""
jd = cal2jul(year, month, day, 0,0,0)
S = jd - 2451545.0
T = S/36525.0
T0 = (6.697374558 + 2400.051336 * T + 0.000025862 *T*T) % 24
GST = (dms2ddd(hour, minute, second) - T0) % 24
while GST <0:
GST += 24
UT = GST * .9972695663
return ddd2dms(UT)
def gst2lst( hour, minute, second, long_degree, long_minute, long_second=0):
""" Corrects GST for a different location on the Earth
"""
GST = dms2ddd(hour,minute,second)
lg = dms2ddd(long_degree, long_minute, long_second)/15.0
lst = ddd2dms((GST + lg) % 24)
return lst
def lst2gst( hour, minute, second, long_degree, long_minute, long_second=0):
""" Inverse of the previous method
"""
lst = dms2ddd(hour,minute,second)
lg = dms2ddd(long_degree, long_minute, long_second)/15.0
GST = ddd2dms((lst + lg) % 24)
return GST
def julian_centuries(year, month, day, hour=0, minute =0, second=0):
d1 = cal2jul(year, month, day, hour, minute, second)
d2 = cal2jul(2000,1,1,12)
return (d1-d2) / 36525.0
def julian_millennia(year, month, day, hour=0, minute =0, second=0):
return julian_centuries(year, month, day, hour, minute, second) / 10.0
def julian_decamillennia(year, month, day, hour=0, minute =0, second=0):
return julian_centuries(year, month, day, hour, minute, second) / 100.0
def obl_ecl_JPL(year, month, day, hour=0, minute = 0, second = 0):
t = julian_centuries(year, month, day, hour, minute, second)
""" from JPL Astronomical Almanac 2010 """
return (23 * 3600 + 26*60 + 21.406
- 46.836769 * t
- 0.0001831 * t * t
+ 0.00200340 * t * t * t
- 0.576e-6 * t * t * t * t
- 4.34e-8 * t * t * t * t * t) / 3600.0
def obl_ecl_Laskar(year, month, day, hour = 0, minute = 0, second = 0):
"""
Original work from Jay Tanner
- converted to Python code by Domenico Mustara 2015
This PHP function computes the mean obliquity of the ecliptic
given a JD argument corresponding to any given date and time.
Author: Jay Tanner - 2010
The algorithm used here is based on work published by J. Laskar
Astronomy and Astrophysics, Vol 157, p68 (1986),
New Formulas for the Precession, Valid Over 10000 years,
Table 8.
Source code provided under the provisions of the
GNU Affero General Public License (AGPL), version 3.
http://www.gnu.org/licenses/agpl.html
// -----------------------------------------------------------
// Compute the (t) value in Julian decamillennia corresponding
// to the JD argument and reckoned from J2000.
$t = ($JD - 2451545.0) / 3652500.0;
// --------------------------------------
"""
t = julian_decamillennia(year, month, day, hour, minute, second)
w = 84381.448
w -= 4680.93 * t
w -= 1.55 * t * t
w += 1999.25 * t * t * t
w -= 51.38 * t * t * t * t
w -= 249.67 * t * t * t * t * t
w -= 39.05 * t * t * t * t * t * t
w += 7.12 * t * t * t * t * t * t * t
w += 27.87 * t * t * t * t * t * t * t * t
w += 5.79 * t * t * t * t * t * t * t * t * t
w += 2.45 * t * t * t * t * t * t * t * t * t * t
return w / 3600.0
""" Some conversion utilities between various coordinate systems """
def sph_ecl2rect_ecl(r, longitude, latitude):
x = r * cos(latitude) * cos(longitude)
y = r * cos(latitude) * sin(longitude)
z = r * sin(latitude)
return (x,y,z)
def rect_ecl2sph_ecl(x,y,z):
r = math.sqrt(x*x + y*y + z*z)
longitude = atan2(y,x)
latitude = atan2(z, math.sqrt(x*x + y*y))
return (r, longitude, latitude)
def sph_equat2rect_equat(r, RA, Declination):
x = r * cos(RA) * cos(Declination)
y = r * sin(RA) * cos(Declination)
z = r * sin(Declination)
return (x,y,x)
def rect_equat2sph_equat(x,y,z):
r = math.sqrt(x*x + y*y +z*z)
RA = atan2(y, x)
Decl = atan2(z, math.sqrt(x*x + y*y))
return (r, RA, Decl)
def rect_ecl2rect_equat(xeclip, yeclip, zeclip, year, month, day, hour = 0, minute = 0, second = 0):
oblecl = obl_ecl_JPL(year, month, day, hour, minute, second)
xequat = xeclip
yequat = yeclip * cos(oblecl) - zeclip * sin(oblecl)
zequat = yeclip * sin(oblecl) + zeclip * cos(oblecl)
return (xequat, yequat, zequat)
def rect_equat2rect_ecl(xequat, yequat, zequat, year, month, day, hour = 0, minute = 0, second = 0):
oblecl = obl_ecl_JPL(year, month, day, hour, minute, second)
xeclip = xequat
yeclip = yequat * cos(- oblecl) - zequat * sin(- oblecl)
zeclip = yequat * sin(- oblecl) + zequat * cos(- oblecl)
return (xeclip, yeclip, zeclip)
| cc0-1.0 | -7,099,347,639,674,084,000 | 34.641844 | 111 | 0.594369 | false | 2.931175 | false | false | false |
stoeckli/iMatrixSpray | octoprint/printer.py | 1 | 20362 | # coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import time
import datetime
import threading
import copy
import os
#import logging, logging.config
import octoprint.util.comm as comm
import octoprint.util as util
from octoprint.settings import settings
from octoprint.events import eventManager
def getConnectionOptions():
"""
Retrieves the available ports, baudrates, prefered port and baudrate for connecting to the printer.
"""
return {
"ports": comm.serialList(),
"baudrates": comm.baudrateList(),
"portPreference": settings().get(["serial", "port"]),
"baudratePreference": settings().getInt(["serial", "baudrate"]),
"autoconnect": settings().getBoolean(["serial", "autoconnect"])
}
class Printer():
def __init__(self, gcodeManager):
from collections import deque
self._gcodeManager = gcodeManager
self._gcodeManager.registerCallback(self)
# state
self._temp = None
self._bedTemp = None
self._targetTemp = None
self._targetBedTemp = None
self._temps = {
"actual": deque([], 300),
"target": deque([], 300),
"actualBed": deque([], 300),
"targetBed": deque([], 300)
}
self._tempBacklog = []
self._latestMessage = None
self._messages = deque([], 300)
self._messageBacklog = []
self._latestLog = None
self._log = deque([], 300)
self._logBacklog = []
self._state = None
self._currentZ = None
self._progress = None
self._printTime = None
self._printTimeLeft = None
self._printAfterSelect = False
# sd handling
self._sdPrinting = False
self._sdStreaming = False
self._selectedFile = None
# comm
self._comm = None
# callbacks
self._callbacks = []
self._lastProgressReport = None
self._stateMonitor = StateMonitor(
ratelimit=0.5,
updateCallback=self._sendCurrentDataCallbacks,
addTemperatureCallback=self._sendAddTemperatureCallbacks,
addLogCallback=self._sendAddLogCallbacks,
addMessageCallback=self._sendAddMessageCallbacks
)
self._stateMonitor.reset(
state={"state": None, "stateString": self.getStateString(), "flags": self._getStateFlags()},
jobData={"filename": None, "filesize": None, "estimatedSprayTime": None, "filament": None},
progress={"progress": None, "filepos": None, "sprayTime": None, "sprayTimeLeft": None},
currentZ=None
)
#~~ callback handling
def registerCallback(self, callback):
self._callbacks.append(callback)
self._sendInitialStateUpdate(callback)
def unregisterCallback(self, callback):
if callback in self._callbacks:
self._callbacks.remove(callback)
def _sendAddTemperatureCallbacks(self, data):
for callback in self._callbacks:
try: callback.addTemperature(data)
except: pass
def _sendAddLogCallbacks(self, data):
for callback in self._callbacks:
try: callback.addLog(data)
except: pass
def _sendAddMessageCallbacks(self, data):
for callback in self._callbacks:
try: callback.addMessage(data)
except: pass
def _sendCurrentDataCallbacks(self, data):
for callback in self._callbacks:
try: callback.sendCurrentData(copy.deepcopy(data))
except: pass
def _sendTriggerUpdateCallbacks(self, type):
for callback in self._callbacks:
try: callback.sendUpdateTrigger(type)
except: pass
def _sendFeedbackCommandOutput(self, name, output):
for callback in self._callbacks:
try: callback.sendFeedbackCommandOutput(name, output)
except: pass
#~~ callback from gcodemanager
def sendUpdateTrigger(self, type):
if type == "gcodeFiles" and self._selectedFile:
self._setJobData(self._selectedFile["filename"],
self._selectedFile["filesize"],
self._selectedFile["sd"])
#~~ printer commands
def connect(self, port=None, baudrate=None):
"""
Connects to the printer. If port and/or baudrate is provided, uses these settings, otherwise autodetection
will be attempted.
"""
if self._comm is not None:
self._comm.close()
self._comm = comm.MachineCom(port, baudrate, callbackObject=self)
def disconnect(self):
"""
Closes the connection to the printer.
"""
if self._comm is not None:
self._comm.close()
self._comm = None
eventManager().fire("Disconnected")
def command(self, command):
"""
Sends a single gcode command to the printer.
"""
self.commands([command])
def commands(self, commands):
"""
Sends multiple gcode commands (provided as a list) to the printer.
"""
for command in commands:
self._comm.sendCommand(command)
def selectFile(self, filename, sd, printAfterSelect=False):
if self._comm is None or (self._comm.isBusy() or self._comm.isStreaming()):
return
self._printAfterSelect = printAfterSelect
self._comm.selectFile(filename, sd)
self._setProgressData(0, None, None, None)
self._setCurrentZ(None)
def unselectFile(self):
if self._comm is not None and (self._comm.isBusy() or self._comm.isStreaming()):
return
self._comm.unselectFile()
self._setProgressData(0, None, None, None)
self._setCurrentZ(None)
def startPrint(self):
"""
Starts the currently loaded print job.
Only starts if the printer is connected and operational, not currently printing and a printjob is loaded
"""
if self._comm is None or not self._comm.isOperational() or self._comm.isPrinting():
return
if self._selectedFile is None:
return
self._setCurrentZ(None)
self._comm.startPrint()
def togglePausePrint(self):
"""
Pause the current printjob.
"""
if self._comm is None:
return
self._comm.setPause(not self._comm.isPaused())
def cancelPrint(self, disableMotorsAndHeater=True):
"""
Cancel the current printjob.
"""
if self._comm is None:
return
self._comm.cancelPrint()
if disableMotorsAndHeater:
self.commands(["M84", "M104 S0", "M140 S0", "M106 S0"]) # disable motors, switch off heaters and fan
# reset progress, height, print time
self._setCurrentZ(None)
self._setProgressData(None, None, None, None)
# mark print as failure
if self._selectedFile is not None:
self._gcodeManager.printFailed(self._selectedFile["filename"])
eventManager().fire("PrintFailed", self._selectedFile["filename"])
#~~ state monitoring
def _setCurrentZ(self, currentZ):
self._currentZ = currentZ
formattedCurrentZ = None
if self._currentZ:
formattedCurrentZ = "%.2f mm" % (self._currentZ)
self._stateMonitor.setCurrentZ(formattedCurrentZ)
def _setState(self, state):
self._state = state
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
def _addLog(self, log):
self._log.append(log)
self._stateMonitor.addLog(log)
def _addMessage(self, message):
self._messages.append(message)
self._stateMonitor.addMessage(message)
def _setProgressData(self, progress, filepos, printTime, printTimeLeft):
self._progress = progress
self._printTime = printTime
self._printTimeLeft = printTimeLeft
formattedPrintTime = None
if (self._printTime):
formattedPrintTime = util.getFormattedTimeDelta(datetime.timedelta(seconds=self._printTime))
formattedPrintTimeLeft = None
if (self._printTimeLeft):
formattedPrintTimeLeft = util.getFormattedTimeDelta(datetime.timedelta(minutes=self._printTimeLeft))
formattedFilePos = None
if (filepos):
formattedFilePos = util.getFormattedSize(filepos)
self._stateMonitor.setProgress({"progress": self._progress, "filepos": formattedFilePos, "printTime": formattedPrintTime, "printTimeLeft": formattedPrintTimeLeft})
def _addTemperatureData(self, temp, bedTemp, targetTemp, bedTargetTemp):
currentTimeUtc = int(time.time() * 1000)
self._temps["actual"].append((currentTimeUtc, temp))
self._temps["target"].append((currentTimeUtc, targetTemp))
self._temps["actualBed"].append((currentTimeUtc, bedTemp))
self._temps["targetBed"].append((currentTimeUtc, bedTargetTemp))
self._temp = temp
self._bedTemp = bedTemp
self._targetTemp = targetTemp
self._targetBedTemp = bedTargetTemp
self._stateMonitor.addTemperature({"currentTime": currentTimeUtc, "temp": self._temp, "bedTemp": self._bedTemp, "targetTemp": self._targetTemp, "targetBedTemp": self._targetBedTemp})
def _setJobData(self, filename, filesize, sd):
if filename is not None:
self._selectedFile = {
"filename": filename,
"filesize": filesize,
"sd": sd
}
else:
self._selectedFile = None
formattedFilename = None
formattedFilesize = None
estimatedPrintTime = None
fileMTime = None
filament = None
if filename:
formattedFilename = os.path.basename(filename)
# Use a string for mtime because it could be float and the
# javascript needs to exact match
if not sd:
fileMTime = str(os.stat(filename).st_mtime)
if filesize:
formattedFilesize = util.getFormattedSize(filesize)
fileData = self._gcodeManager.getFileData(filename)
if fileData is not None and "gcodeAnalysis" in fileData.keys():
if "estimatedPrintTime" in fileData["gcodeAnalysis"].keys():
estimatedPrintTime = fileData["gcodeAnalysis"]["estimatedPrintTime"]
if "filament" in fileData["gcodeAnalysis"].keys():
filament = fileData["gcodeAnalysis"]["filament"]
self._stateMonitor.setJobData({"filename": formattedFilename, "filesize": formattedFilesize, "estimatedPrintTime": estimatedPrintTime, "filament": filament, "sd": sd, "mtime": fileMTime})
def _sendInitialStateUpdate(self, callback):
try:
data = self._stateMonitor.getCurrentData()
# convert the dict of deques to a dict of lists
temps = {k: list(v) for (k,v) in self._temps.iteritems()}
data.update({
"temperatureHistory": temps,
"logHistory": list(self._log),
"messageHistory": list(self._messages)
})
callback.sendHistoryData(data)
except Exception, err:
import sys
sys.stderr.write("ERROR: %s\n" % str(err))
pass
def _getStateFlags(self):
if not settings().getBoolean(["feature", "sdSupport"]) or self._comm is None:
sdReady = False
else:
sdReady = self._comm.isSdReady()
return {
"operational": self.isOperational(),
"printing": self.isPrinting(),
"closedOrError": self.isClosedOrError(),
"error": self.isError(),
"paused": self.isPaused(),
"ready": self.isReady(),
"sdReady": sdReady
}
def getCurrentData(self):
return self._stateMonitor.getCurrentData()
#~~ callbacks triggered from self._comm
def mcLog(self, message):
"""
Callback method for the comm object, called upon log output.
"""
self._addLog(message)
def mcTempUpdate(self, temp, bedTemp, targetTemp, bedTargetTemp):
self._addTemperatureData(temp, bedTemp, targetTemp, bedTargetTemp)
def mcStateChange(self, state):
"""
Callback method for the comm object, called if the connection state changes.
"""
oldState = self._state
# forward relevant state changes to gcode manager
if self._comm is not None and oldState == self._comm.STATE_PRINTING:
if self._selectedFile is not None:
if state == self._comm.STATE_OPERATIONAL:
self._gcodeManager.printSucceeded(self._selectedFile["filename"])
elif state == self._comm.STATE_CLOSED or state == self._comm.STATE_ERROR or state == self._comm.STATE_CLOSED_WITH_ERROR:
self._gcodeManager.printFailed(self._selectedFile["filename"])
self._gcodeManager.resumeAnalysis() # printing done, put those cpu cycles to good use
elif self._comm is not None and state == self._comm.STATE_PRINTING:
self._gcodeManager.pauseAnalysis() # do not analyse gcode while printing
self._setState(state)
def mcMessage(self, message):
"""
Callback method for the comm object, called upon message exchanges via serial.
Stores the message in the message buffer, truncates buffer to the last 300 lines.
"""
self._addMessage(message)
def mcProgress(self):
"""
Callback method for the comm object, called upon any change in progress of the printjob.
Triggers storage of new values for printTime, printTimeLeft and the current progress.
"""
self._setProgressData(self._comm.getPrintProgress(), self._comm.getPrintFilepos(), self._comm.getPrintTime(), self._comm.getPrintTimeRemainingEstimate())
def mcZChange(self, newZ):
"""
Callback method for the comm object, called upon change of the z-layer.
"""
oldZ = self._currentZ
if newZ != oldZ:
# we have to react to all z-changes, even those that might "go backward" due to a slicer's retraction or
# anti-backlash-routines. Event subscribes should individually take care to filter out "wrong" z-changes
eventManager().fire("ZChange", newZ)
self._setCurrentZ(newZ)
def mcSdStateChange(self, sdReady):
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
def mcSdFiles(self, files):
self._sendTriggerUpdateCallbacks("gcodeFiles")
def mcFileSelected(self, filename, filesize, sd):
self._setJobData(filename, filesize, sd)
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
if self._printAfterSelect:
self.startPrint()
def mcPrintjobDone(self):
self._setProgressData(1.0, self._selectedFile["filesize"], self._comm.getPrintTime(), 0)
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
def mcFileTransferStarted(self, filename, filesize):
self._sdStreaming = True
self._setJobData(filename, filesize, True)
self._setProgressData(0.0, 0, 0, None)
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
def mcFileTransferDone(self):
self._sdStreaming = False
self._setCurrentZ(None)
self._setJobData(None, None, None)
self._setProgressData(None, None, None, None)
self._stateMonitor.setState({"state": self._state, "stateString": self.getStateString(), "flags": self._getStateFlags()})
def mcReceivedRegisteredMessage(self, command, output):
self._sendFeedbackCommandOutput(command, output)
#~~ sd file handling
def getSdFiles(self):
if self._comm is None:
return
return self._comm.getSdFiles()
def addSdFile(self, filename, path):
if not self._comm or self._comm.isBusy():
return
self._comm.startFileTransfer(path, filename[:8].lower() + ".gco")
def deleteSdFile(self, filename):
if not self._comm:
return
self._comm.deleteSdFile(filename)
def initSdCard(self):
if not self._comm:
return
self._comm.initSdCard()
def releaseSdCard(self):
if not self._comm:
return
self._comm.releaseSdCard()
def refreshSdFiles(self):
if not self._comm:
return
self._comm.refreshSdFiles()
#~~ state reports
def getStateString(self):
"""
Returns a human readable string corresponding to the current communication state.
"""
if self._comm is None:
return "Offline"
else:
return self._comm.getStateString()
def getCurrentData(self):
return self._stateMonitor.getCurrentData()
def getCurrentJob(self):
currentData = self._stateMonitor.getCurrentData()
return currentData["job"]
def getCurrentTemperatures(self):
return {
"extruder": {
"current": self._temp,
"target": self._targetTemp
},
"bed": {
"current": self._bedTemp,
"target": self._targetBedTemp
}
}
def isClosedOrError(self):
return self._comm is None or self._comm.isClosedOrError()
def isOperational(self):
return self._comm is not None and self._comm.isOperational()
def isPrinting(self):
return self._comm is not None and self._comm.isPrinting()
def isPaused(self):
return self._comm is not None and self._comm.isPaused()
def isError(self):
return self._comm is not None and self._comm.isError()
def isReady(self):
return self.isOperational() and not self._comm.isStreaming()
def isLoading(self):
return self._gcodeLoader is not None
class GcodeLoader(threading.Thread):
"""
The GcodeLoader takes care of loading a gcode-File from disk and parsing it into a gcode object in a separate
thread while constantly notifying interested listeners about the current progress.
The progress is returned as a float value between 0 and 1 which is to be interpreted as the percentage of completion.
"""
def __init__(self, filename, progressCallback, loadedCallback):
threading.Thread.__init__(self)
self._progressCallback = progressCallback
self._loadedCallback = loadedCallback
self._filename = filename
self._gcodeList = None
def run(self):
#Send an initial M110 to reset the line counter to zero.
prevLineType = lineType = "CUSTOM"
gcodeList = ["M110 N0"]
filesize = os.stat(self._filename).st_size
with open(self._filename, "r") as file:
for line in file:
if line.startswith(";TYPE:"):
lineType = line[6:].strip()
if ";" in line:
line = line[0:line.find(";")]
line = line.strip()
if len(line) > 0:
if prevLineType != lineType:
gcodeList.append((line, lineType, ))
else:
gcodeList.append(line)
prevLineType = lineType
self._onLoadingProgress(float(file.tell()) / float(filesize))
self._gcodeList = gcodeList
self._loadedCallback(self._filename, self._gcodeList)
def _onLoadingProgress(self, progress):
self._progressCallback(self._filename, progress, "loading")
def _onParsingProgress(self, progress):
self._progressCallback(self._filename, progress, "parsing")
class SdFileStreamer(threading.Thread):
def __init__(self, comm, filename, file, progressCallback, finishCallback):
threading.Thread.__init__(self)
self._comm = comm
self._filename = filename
self._file = file
self._progressCallback = progressCallback
self._finishCallback = finishCallback
def run(self):
if self._comm.isBusy():
return
name = self._filename[:self._filename.rfind(".")]
sdFilename = name[:8].lower() + ".gco"
try:
size = os.stat(self._file).st_size
with open(self._file, "r") as f:
self._comm.startSdFileTransfer(sdFilename)
for line in f:
if ";" in line:
line = line[0:line.find(";")]
line = line.strip()
if len(line) > 0:
self._comm.sendCommand(line)
time.sleep(0.001) # do not send too fast
self._progressCallback(sdFilename, float(f.tell()) / float(size))
finally:
self._comm.endSdFileTransfer(sdFilename)
self._finishCallback(sdFilename)
class StateMonitor(object):
def __init__(self, ratelimit, updateCallback, addTemperatureCallback, addLogCallback, addMessageCallback):
self._ratelimit = ratelimit
self._updateCallback = updateCallback
self._addTemperatureCallback = addTemperatureCallback
self._addLogCallback = addLogCallback
self._addMessageCallback = addMessageCallback
self._state = None
self._jobData = None
self._gcodeData = None
self._sdUploadData = None
self._currentZ = None
self._progress = None
self._changeEvent = threading.Event()
self._lastUpdate = time.time()
self._worker = threading.Thread(target=self._work)
self._worker.daemon = True
self._worker.start()
def reset(self, state=None, jobData=None, progress=None, currentZ=None):
self.setState(state)
self.setJobData(jobData)
self.setProgress(progress)
self.setCurrentZ(currentZ)
def addTemperature(self, temperature):
self._addTemperatureCallback(temperature)
self._changeEvent.set()
def addLog(self, log):
self._addLogCallback(log)
self._changeEvent.set()
def addMessage(self, message):
self._addMessageCallback(message)
self._changeEvent.set()
def setCurrentZ(self, currentZ):
self._currentZ = currentZ
self._changeEvent.set()
def setState(self, state):
self._state = state
self._changeEvent.set()
def setJobData(self, jobData):
self._jobData = jobData
self._changeEvent.set()
def setProgress(self, progress):
self._progress = progress
self._changeEvent.set()
def _work(self):
while True:
self._changeEvent.wait()
now = time.time()
delta = now - self._lastUpdate
additionalWaitTime = self._ratelimit - delta
if additionalWaitTime > 0:
time.sleep(additionalWaitTime)
data = self.getCurrentData()
self._updateCallback(data)
self._lastUpdate = time.time()
self._changeEvent.clear()
def getCurrentData(self):
return {
"state": self._state,
"job": self._jobData,
"currentZ": self._currentZ,
"progress": self._progress
}
| agpl-3.0 | 2,885,922,597,023,972,000 | 28.379509 | 189 | 0.712525 | false | 3.325167 | true | false | false |
nimasmi/wagtail | wagtail/core/blocks/struct_block.py | 1 | 8310 | import collections
from django import forms
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.html import format_html, format_html_join
from django.utils.safestring import mark_safe
from wagtail.admin.staticfiles import versioned_static
from .base import Block, DeclarativeSubBlocksMetaclass
from .utils import js_dict
__all__ = ['BaseStructBlock', 'StructBlock', 'StructValue']
class StructValue(collections.OrderedDict):
""" A class that generates a StructBlock value from provded sub-blocks """
def __init__(self, block, *args):
super().__init__(*args)
self.block = block
def __html__(self):
return self.block.render(self)
def render_as_block(self, context=None):
return self.block.render(self, context=context)
@cached_property
def bound_blocks(self):
return collections.OrderedDict([
(name, block.bind(self.get(name)))
for name, block in self.block.child_blocks.items()
])
class BaseStructBlock(Block):
def __init__(self, local_blocks=None, **kwargs):
self._constructor_kwargs = kwargs
super().__init__(**kwargs)
# create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks
self.child_blocks = self.base_blocks.copy()
if local_blocks:
for name, block in local_blocks:
block.set_name(name)
self.child_blocks[name] = block
self.child_js_initializers = {}
for name, block in self.child_blocks.items():
js_initializer = block.js_initializer()
if js_initializer is not None:
self.child_js_initializers[name] = js_initializer
self.dependencies = self.child_blocks.values()
def get_default(self):
"""
Any default value passed in the constructor or self.meta is going to be a dict
rather than a StructValue; for consistency, we need to convert it to a StructValue
for StructBlock to work with
"""
return self._to_struct_value(self.meta.default.items())
def js_initializer(self):
# skip JS setup entirely if no children have js_initializers
if not self.child_js_initializers:
return None
return "StructBlock(%s)" % js_dict(self.child_js_initializers)
@property
def media(self):
return forms.Media(js=[versioned_static('wagtailadmin/js/blocks/struct.js')])
def get_form_context(self, value, prefix='', errors=None):
if errors:
if len(errors) > 1:
# We rely on StructBlock.clean throwing a single ValidationError with a specially crafted
# 'params' attribute that we can pull apart and distribute to the child blocks
raise TypeError('StructBlock.render_form unexpectedly received multiple errors')
error_dict = errors.as_data()[0].params
else:
error_dict = {}
bound_child_blocks = collections.OrderedDict([
(
name,
block.bind(value.get(name, block.get_default()),
prefix="%s-%s" % (prefix, name), errors=error_dict.get(name))
)
for name, block in self.child_blocks.items()
])
return {
'children': bound_child_blocks,
'help_text': getattr(self.meta, 'help_text', None),
'classname': self.meta.form_classname,
'block_definition': self,
'prefix': prefix,
}
def render_form(self, value, prefix='', errors=None):
context = self.get_form_context(value, prefix=prefix, errors=errors)
return mark_safe(render_to_string(self.meta.form_template, context))
def value_from_datadict(self, data, files, prefix):
return self._to_struct_value([
(name, block.value_from_datadict(data, files, '%s-%s' % (prefix, name)))
for name, block in self.child_blocks.items()
])
def value_omitted_from_data(self, data, files, prefix):
return all(
block.value_omitted_from_data(data, files, '%s-%s' % (prefix, name))
for name, block in self.child_blocks.items()
)
def clean(self, value):
result = [] # build up a list of (name, value) tuples to be passed to the StructValue constructor
errors = {}
for name, val in value.items():
try:
result.append((name, self.child_blocks[name].clean(val)))
except ValidationError as e:
errors[name] = ErrorList([e])
if errors:
# The message here is arbitrary - StructBlock.render_form will suppress it
# and delegate the errors contained in the 'params' dict to the child blocks instead
raise ValidationError('Validation error in StructBlock', params=errors)
return self._to_struct_value(result)
def to_python(self, value):
""" Recursively call to_python on children and return as a StructValue """
return self._to_struct_value([
(
name,
(child_block.to_python(value[name]) if name in value else child_block.get_default())
# NB the result of get_default is NOT passed through to_python, as it's expected
# to be in the block's native type already
)
for name, child_block in self.child_blocks.items()
])
def _to_struct_value(self, block_items):
""" Return a Structvalue representation of the sub-blocks in this block """
return self.meta.value_class(self, block_items)
def get_prep_value(self, value):
""" Recursively call get_prep_value on children and return as a plain dict """
return dict([
(name, self.child_blocks[name].get_prep_value(val))
for name, val in value.items()
])
def get_api_representation(self, value, context=None):
""" Recursively call get_api_representation on children and return as a plain dict """
return dict([
(name, self.child_blocks[name].get_api_representation(val, context=context))
for name, val in value.items()
])
def get_searchable_content(self, value):
content = []
for name, block in self.child_blocks.items():
content.extend(block.get_searchable_content(value.get(name, block.get_default())))
return content
def deconstruct(self):
"""
Always deconstruct StructBlock instances as if they were plain StructBlocks with all of the
field definitions passed to the constructor - even if in reality this is a subclass of StructBlock
with the fields defined declaratively, or some combination of the two.
This ensures that the field definitions get frozen into migrations, rather than leaving a reference
to a custom subclass in the user's models.py that may or may not stick around.
"""
path = 'wagtail.core.blocks.StructBlock'
args = [list(self.child_blocks.items())]
kwargs = self._constructor_kwargs
return (path, args, kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
for name, child_block in self.child_blocks.items():
errors.extend(child_block.check(**kwargs))
errors.extend(child_block._check_name(**kwargs))
return errors
def render_basic(self, value, context=None):
return format_html('<dl>\n{}\n</dl>', format_html_join(
'\n', ' <dt>{}</dt>\n <dd>{}</dd>', value.items()))
class Meta:
default = {}
form_classname = 'struct-block'
form_template = 'wagtailadmin/block_forms/struct.html'
value_class = StructValue
# No icon specified here, because that depends on the purpose that the
# block is being used for. Feel encouraged to specify an icon in your
# descendant block type
icon = "placeholder"
class StructBlock(BaseStructBlock, metaclass=DeclarativeSubBlocksMetaclass):
pass
| bsd-3-clause | 7,582,453,976,146,293,000 | 37.472222 | 107 | 0.622262 | false | 4.229008 | false | false | false |
ypid/series60-remote | pc/devices/status_numbers.py | 1 | 2071 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2010 Lukas Hetzenecker <[email protected]>
NUM_CONNECTED = 100
NUM_HELLO_REQUEST = 110
NUM_HELLO_REPLY = 111
NUM_QUIT = 120
NUM_PARTIAL_MESSAGE = 130
NUM_CONTACTS_REQUEST_HASH_ALL = 200
NUM_CONTACTS_REQUEST_HASH_SINGLE= 201
NUM_CONTACTS_REQUEST_CONTACT = 204
NUM_CONTACTS_REQUEST_CONTACTS_ALL = 205
NUM_CONTACTS_REPLY_HASH_ALL= 210
NUM_CONTACTS_REPLY_HASH_SINGLE_START= 211
NUM_CONTACTS_REPLY_HASH_SINGLE_LINE= 212
NUM_CONTACTS_REPLY_HASH_SINGLE_END= 213
NUM_CONTACTS_REPLY_CONTACT_START = 220
NUM_CONTACTS_REPLY_CONTACT_LINE = 221
NUM_CONTACTS_REPLY_CONTACT_END = 222
NUM_CONTACTS_REPLY_CONTACTS_ALL_END = 223
NUM_CONTACTS_ADD = 230
NUM_CONTACTS_ADD_REPLY_ID = 231
NUM_CONTACTS_DELETE = 232
NUM_CONTACTS_CHANGE_ADDFIELD = 233
NUM_CONTACTS_CHANGE_REMOVEFIELD = 234
NUM_SYSINFO_REQUEST = 250
NUM_SYSINFO_REPLY_START = 260
NUM_SYSINFO_REPLY_LINE = 261
NUM_SYSINFO_REPLY_END = 262
NUM_MESSAGE_SEND_REQUEST = 300
NUM_MESSAGE_SEND_REPLY_OK = 301
NUM_MESSAGE_SEND_REPLY_STATUS = 302
NUM_MESSAGE_SEND_REPLY_FAILURE = 303
NUM_MESSAGE_SEND_REPLY_RETRY = 304
NUM_SET_READ = 320
NUM_MESSAGE_NEW = 350
NUM_MESSAGE_REQUEST = 351
NUM_MESSAGE_REPLY_LINE = 352
NUM_MESSAGE_REPLY_END = 353
NUM_MESSAGE_REQUEST_UNREAD = 370
NUM_MESSAGE_REPLY_UNREAD = 371
NUM_CALENDAR_REQUEST_HASH_ALL = 380
#NUM_CALENDAR_REQUEST_HASH_SINGLE = 381
NUM_CALENDAR_REQUEST_ENTRY = 382
NUM_CALENDAR_REQUEST_ENTRIES_ALL = 383
NUM_CALENDAR_REPLY_HASH_ALL= 384
#NUM_CALENDAR_REPLY_HASH_SINGLE_START= 385
#NUM_CALENDAR_REPLY_HASH_SINGLE_LINE= 386
#NUM_CALENDAR_REPLY_HASH_SINGLE_END= 387
NUM_CALENDAR_REPLY_ENTRIES_START = 388
NUM_CALENDAR_REPLY_ENTRY = 389
NUM_CALENDAR_REPLY_ENTRIES_END = 390
NUM_CALENDAR_ENTRY_ADD = 395
NUM_CALENDAR_ENTRY_ADD_REPLY = 396
NUM_CALENDAR_ENTRY_DELETE = 397
NUM_CALENDAR_ENTRY_CHANGE = 398
NUM_CALENDAR_ENTRY_CHANGE_REPLY_TIME = 399
NUM_INCOMING_CALL = 400
NUM_DEBUG = 999
NUM_END_HEADER = chr(0x02) # Start of Text
NUM_SEPERATOR = chr(0x1E) # Record Separator
NUM_END_TEXT = chr(0x03) # End of Text
PROTOCOL_VERSION = 1.5
| gpl-2.0 | 2,607,002,893,510,730,000 | 26.986486 | 59 | 0.759536 | false | 2.486194 | false | true | false |
ngmiller/mipsy | mipsy/encoder.py | 1 | 8100 | """
mipsy.encoder
Instruction encoder.
See README.md for usage and general information.
"""
# system imports
import bitstring
# application imports
from mipsy.arch import MIPS
from mipsy.util import LabelCache, ParseInfo
class Encoder(object):
"""
Responsible for encoding individual instructions and querying the label cache.
"""
class tokenizer(object):
"""
Defines a 'list' of tokenizing functions used for varying instructions.
Each 'tokenizer' returns a dictionary mapping the specified operands to their tokens
from the instruction data (the portion of the instruction following the operation)
instruction = (operation) (instruction_data) <-- here, we're only concerned with instruction_data
"""
def map_operands(self, to_split, operands):
"""
Helper method.
Maps operands to the preprocessed instruction data string.
"""
operand_values = to_split.split()
if len(operands) != len(operand_values):
raise RuntimeError('instruction contains too many operands')
operand_map = {}
for i in range(len(operands)):
operand_map[operands[i]] = operand_values[i]
return operand_map
def RI_type(self, operands, instruction_data):
"""
The RI_type tokenizer takes instructions with the format:
(operation) [(operand1), (operand2), (operand3)]
"""
to_split = instruction_data.replace(',', ' ')
return self.map_operands(to_split, operands)
def J_type(self, operands, instruction_data):
"""
The J_type tokenizer takes jump (j, jal, jr) instructions
with the format:
(operation) [operand]
"""
return self.map_operands(instruction_data, operands)
def load_store(self, operands, instruction_data):
"""
The load_store tokenizer takes instructions with the format:
(operation) [operand1, (operand2)(operand3)]
"""
# Clear out commas and the parenthesis surrounding the base register
to_split = instruction_data.replace(',', ' ').replace('(', ' ').replace(')', ' ')
return self.map_operands(to_split, operands)
def nop(self, operands, instruction_data):
"""
The nop tokenizer simply maps all the given operands to register $zero.
"""
return {operand: '$zero' for operand in operands}
# The assembler operation table defines the parsing rules
# for a given instruction. The parsing rules are used to
# map tokens in the instruction string to register address
# and immediate value positions. (rs, rt, rd, etc)
t = tokenizer()
operations = {
'nop' : ParseInfo(['rd', 'rs', 'rt'], t.nop),
'add' : ParseInfo(['rd', 'rs', 'rt'], t.RI_type),
'addi' : ParseInfo(['rt', 'rs', 'imm'], t.RI_type),
'and' : ParseInfo(['rd', 'rs', 'rt'], t.RI_type),
'beq' : ParseInfo(['rs', 'rt', 'label'], t.RI_type),
'j' : ParseInfo(['label'], t.J_type),
'jal' : ParseInfo(['label'], t.J_type),
'jr' : ParseInfo(['rs'], t.RI_type),
'lw' : ParseInfo(['rt', 'imm', 'rs'], t.load_store),
'or' : ParseInfo(['rd', 'rs', 'rt'], t.RI_type),
'slt' : ParseInfo(['rd', 'rs', 'rt'], t.RI_type),
'sll' : ParseInfo(['rd', 'rt', 'shamt'], t.RI_type),
'sw' : ParseInfo(['rt', 'imm', 'rs'], t.load_store),
'sub' : ParseInfo(['rd', 'rs', 'rt'], t.RI_type),
# TODO ...
}
def __init__(self):
# ISA definitions
self.mips = MIPS()
# Label resolution cache
self.label_cache = LabelCache()
def encode_instruction(self, pc, instr):
"""
Given an instruction string, generate the encoded bit string.
PC (instruction index is used for branch label resolution)
"""
data = instr.split()
operation = data[0]
try:
mips_op_info = MIPS.operations[operation]
except KeyError, e:
raise RuntimeError('Unknown operation: {}'.format(operation))
# Grab the parsing info from the assembler operations table
# Generate the initial operand map using the specified tokenizer
parse_info = self.operations[operation]
encoding_map = parse_info.tokenizer(parse_info.tokens, ''.join(data[1:]))
# Get the binary equivalents of the operands and MIPS operation information
self.resolve_operands(encoding_map, operation, pc)
# Pull MIPS operation info into encoding map
self.resolve_operation_info(encoding_map, mips_op_info)
instruction = self.mips.generate_instruction(mips_op_info.format)
return instruction.encode(encoding_map)
def resolve_operation_info(self, encoding_map, mips_op_info):
"""
Adds the predefined operation info (opcode, funct) to the current encoding map.
"""
encoding_map['opcode'] = mips_op_info.opcode
encoding_map['funct'] = mips_op_info.funct
def resolve_operands(self, encoding_map, operation, pc):
"""
Converts generic register references (such as $t0, $t1, etc), immediate values, and jump addresses
to their binary equivalents.
"""
convert = Encoder.to_binary
branch_replace = False
jump_replace = False
for operand, value in encoding_map.iteritems():
if (operand == 'rs' or operand == 'rt' or operand == 'rd'):
encoding_map[operand] = MIPS.registers[value]
elif (operand == 'imm'):
encoding_map[operand] = convert(int(value), MIPS.IMMEDIATE_SIZE)
elif (operand == 'addr'):
encoding_map[operand] = convert(int(value), MIPS.ADDRESS_SIZE)
elif (operand == 'shamt'):
encoding_map[operand] = convert(int(value), MIPS.SHAMT_SIZE)
elif (operand == 'label'):
label = encoding_map[operand]
hit, index = self.label_cache.query(label)
if not hit:
raise RuntimeError('No address found for label: {}'.format(label))
if ((operation == 'beq') or (operation == 'bne')):
# Calculate the relative instruction offset. The MIPS ISA uses
# PC + 4 + (branch offset) to resolve branch targets.
if index > pc:
encoding_map[operand] = convert(index - pc - 1, MIPS.IMMEDIATE_SIZE)
elif index < pc:
encoding_map[operand] = convert((pc + 1) - index, MIPS.IMMEDIATE_SIZE)
else:
# Not sure why a branch would resolve to itself, but ok
# (PC + 4) - 4 =
encoding_map[operand] = convert(-1, MIPS.IMMEDIATE_SIZE)
branch_replace = True
elif ((operation == 'j') or (operation == 'jal')):
# Jump addresses are absolute
encoding_map[operand] = convert(index, MIPS.ADDRESS_SIZE)
jump_replace = True
# Need to convert references to 'label' back to references the instruction
# encoding string recognizes, otherwise we end up with the default value (zero)
# This doesn't feel very clean, but working on a fix.
if branch_replace:
encoding_map['imm'] = encoding_map['label']
elif jump_replace:
encoding_map['addr'] = encoding_map['label']
@staticmethod
def to_binary(decimal, length):
"""
Given a decimal, generate the binary equivalent string of
given length.
e.g. binary(2, 5) = 00010
"""
b = bitstring.Bits(int=decimal, length=length)
return b.bin
| mit | -3,993,751,590,257,310,700 | 38.512195 | 106 | 0.564691 | false | 4.236402 | false | false | false |
Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/Backends/Processors/Graphs/Plot1D.py | 1 | 2341 | #Created by Dmytro Konobrytskyi, 2014 (github.com/Akson)
import numpy as np
import matplotlib
import matplotlib.pyplot
from RCP3.Infrastructure import TmpFilesStorage
class Backend(object):
def __init__(self, parentNode):
self._parentNode = parentNode
def Delete(self):
"""
This method is called when a parent node is deleted.
"""
pass
def GetParameters(self):
"""
Returns a dictionary with object parameters, their values,
limits and ways to change them.
"""
return {}
def SetParameters(self, parameters):
"""
Gets a dictionary with parameter values and
update object parameters accordingly
"""
pass
def ProcessMessage(self, message):
"""
This message is called when a new message comes.
If an incoming message should be processed by following nodes, the
'self._parentNode.SendMessage(message)'
should be called with an appropriate message.
"""
dataArray = np.asarray(message["Data"])
fig = matplotlib.pyplot.figure(figsize=(6, 4), dpi=float(96))
ax=fig.add_subplot(111)
#n, bins, patches = ax.hist(dataArray, bins=50)
ax.plot(range(len(dataArray)), dataArray)
processedMessage = {"Stream":message["Stream"], "Info":message["Info"]}
filePath, link = TmpFilesStorage.NewTemporaryFile("png")
fig.savefig(filePath,format='png')
matplotlib.pyplot.close(fig)
html = '<img src="http://{}" alt="Image should come here">'.format(link)
processedMessage["Data"] = html
self._parentNode.SendMessage(processedMessage)
"""
print len(message["Data"])
import numpy as np
import matplotlib.pyplot as plt
x = np.array(message["Data"])
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
plt.subplots_adjust(left=0.15)
plt.show()
"""
def AppendContextMenuItems(self, menu):
"""
Append backend specific menu items to a context menu that user will see
when he clicks on a node.
"""
pass | lgpl-3.0 | -487,449,994,099,500,860 | 29.415584 | 88 | 0.5912 | false | 4.383895 | false | false | false |
BarusXXX/K-Tree | TreeLogic.py | 1 | 3884 | import os
from copy import deepcopy
class RecursiveTree:
def __init__(self, dir_name):
self.dir_name = dir_name
self.files = []
self.folders = [] #Tuple Absolute address, branch, level
self.branches = []
self.children_n = []
self.currentlevel = 0
self.level=[] #len(self.branches)
self.level.append(0)
self.folder_n = len(self.folders)
self.parentIndex = []
self.parentbranch = []
self.iterator = 0
self.reversead = 0
self.parentIndex.append(None)
self.branches.append([0])
self.folders.append((dir_name, "{0}", 0))
RecursiveTree.get_immediate_subdirectories(self, self.dir_name, 0)
self.level_max = max(self.level)
def Branch(self):
pass
def PrintTree(self):
print("#Folders#")
for x in self.folders:
print(x)
print("#Branches#")
for x in self.branches:
print(x)
print("#Parent Branches#")
for x in self.parentbranch:
print(x)
print("#Files#")
for x in self.files:
print(x)
def subdir(self):
return self.folders
def filedir(self):
return self.files
def sortedbranches(self):
STree = []
CountX = 0
for x in self.branches:
STree.append([])
for y in x:
STree[CountX].append(int(y))
CountX += 1
SSum = []
CountX = 0
TTree = deepcopy(STree)
for x in TTree:
CountY = 0
for y in x:
TTree[CountX][CountY] = y + 1
CountY += 1
CountX += 1
SSum.append(sum(x))
SortedTree = [x for y, x in sorted(list(zip(SSum, STree)))]
def get_immediate_subdirectories(self, a_dir, curadd):
nextadd = 0
relocator = 0
cancleNo = self.reversead
for name in os.listdir(a_dir):
if os.path.isdir(os.path.join(a_dir, name)):
curaddstr = str(curadd) + ";" + str(nextadd)
relocator += 1
self.iterator += 1
self.currentlevel += 1
ContainsSub = False
ContainsNo = 0
for x in os.listdir(a_dir + "/" + name):
if os.path.isdir(a_dir + "/" + name + "/" + x):
ContainsSub = True
ContainsNo += 1
self.children_n.append(ContainsNo)
PathConstructor = "{" + str(curadd) + ";" + str(nextadd) + "}" + ":" + os.path.join(a_dir, name)
AbsAddressConstructor = (PathConstructor.split(":")[1]), (PathConstructor.split(":")[2])
self.folders.append((":".join(AbsAddressConstructor), PathConstructor.split(":")[0], self.currentlevel))
self.branches.append((((((PathConstructor.split(":")[0]).split("{")[1])).split("}")[0]).split(";")))
self.parentbranch.append(str(curadd).split(";"))
self.level.append(self.currentlevel)
self.parentIndex.append(self.iterator - relocator - self.reversead + cancleNo) #Cannot negate 1
RecursiveTree.get_immediate_subdirectories(self, (a_dir + "/" + name), curaddstr)
self.currentlevel -= 1
if ContainsSub == True:
self.reversead += ContainsNo
nextadd += 1
else:
self.files.append((self.iterator - relocator - self.reversead + cancleNo, os.path.join(a_dir, name))) #index of parent, direct links to file
#print("file found:", self.iterator - relocator - self.reversead + cancleNo, name)
#print("{"+str(curadd) + ";" + str(nextadd) + "}" + ":" + os.path.join(a_dir, name))
| mit | 4,737,420,698,815,880,000 | 29.582677 | 156 | 0.511843 | false | 3.903518 | false | false | false |
ndparker/wolfe | wolfe/scheduler/_job_queue.py | 1 | 4458 | # -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2014 - 2016
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========
Job Queue
===========
Job Queue. The queue is implemented as priority queue using a heap.
"""
if __doc__: # pragma: no cover
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import heapq as _heapq
class JobQueue(object):
"""
Job queue
This container utilizes a heap structure to implement a more or less
generic priority queue (see below). The sorting order of the items is
defined by a wrapper class passed to the constructor.
The queue is made for jobs. That's why wrapper classes have to provide a
job attribute for unwrapping and items passed into the queue are expected
to provide a valid ``id`` attribute.
Additionally the queue implements boolean operations (it's false if it's
empty) and a __contains__ operation based on job IDs.
>>> class Wrapper(object):
... def __init__(self, job):
... self.job = job
... def __lt__(self, other):
... return self.job.id > other.job.id
>>> class Job(object):
... def __init__(self, job_id):
... self.id = job_id
>>> queue = JobQueue(Wrapper)
>>> queue.put(Job(2))
>>> bool(queue)
True
>>> 1 in queue
False
>>> 2 in queue
True
>>> len(queue)
1
:IVariables:
`_queue` : ``list``
actual heap containing wrapped jobs
`_wrapper` : callable
Wrapper class factory
`_ids` : ``set``
Set of job IDs currently queued
"""
def __init__(self, wrapper_class):
"""
Initialization
:Parameters:
`wrapper_class` : any
class factory expected to take a job and represent it inside the
queue. The object should be comparable with other instances
(``__lt__`` is the proper method) and should provide a ``job``
attribute pointing to the original object.
"""
self._queue = []
self._wrapper = wrapper_class
self._ids = set()
def __nonzero__(self):
"""
Return false if the queue is empty, true otherwise
:Return: Is there something in the queue?
:Rtype: ``bool``
"""
return bool(self._queue)
def __contains__(self, job_id):
"""
Check if the passed job_id is currently enqueued
:Return: Is it?
:Rtype: ``bool``
"""
return job_id in self._ids
def __len__(self):
""" Find queue length """
return len(self._queue)
def __iter__(self):
""" Iterate over the queue until it's exhausted """
try:
while True:
yield self.get()
except IndexError:
pass
def put(self, job):
"""
Put a job into the queue
:Parameters:
`job` : any
The job to put in. The object must have an ``id`` attribute,
which must be hashable.
"""
self._ids.add(job.id)
_heapq.heappush(self._queue, self._wrapper(job))
def get(self):
"""
Get the next job from the queue
:Return: A job
:Rtype: any
:Exceptions:
- `IndexError` : Queue was empty
"""
job = _heapq.heappop(self._queue).job
self._ids.remove(job.id)
return job
def peek(self):
"""
Return the next job without removing it from the queue
The job will still be wrapped in the wrapper_class container
:Return: wrapped job
:Rtype: any
:Exceptions:
- `IndexError` : Queue was empty
"""
return self._queue[0]
| apache-2.0 | -4,326,341,695,374,241,300 | 25.855422 | 77 | 0.580978 | false | 4.270115 | false | false | false |
bvanrijn/debianpaste-clients | old-paste.py | 1 | 7602 | #!/usr/bin/python
# Filename: paste
# Purpose: XmlRpc interface client to paste.debian.net
# Author: Copyright (C) 2007-2011 Michael Gebetsroither <[email protected]>
# License: This file is licensed under the GPL v2+. Full license text in LICENSE
# Modified original: No modifications have been made
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import sys
import xmlrpclib
import optparse
import inspect
import getpass
# program defaults
DEFAULT_SERVER='http://paste.debian.net/server.pl'
class ActionFailedException(Exception):
'''Thrown if server returned an error'''
def __init__(self, errormsg, ret):
Exception.__init__(self, errormsg, ret)
def what(self):
'''Get errormessage'''
return self.args[0]
def dwhat(self):
'''Get more verbose errormessage'''
return self.args[1]
class Action(object):
def __init__(self, args, opts):
self.args_ = args
self.opts_ = opts
def _createProxy(self):
return xmlrpclib.ServerProxy(self.opts_.server, verbose=False)
def _callProxy(self, functor, server=None):
'''Wrapper for xml-rpc calls to server which throws an
ActionFailedException on error'''
if server is None:
server = self._createProxy()
ret = functor(server)
if ret['rc'] != 0:
raise ActionFailedException(ret['statusmessage'], ret)
return ret
def call(self, method_name):
'''External Interface to call the appropriate action'''
return self.__getattribute__(method_name)()
def actionAddPaste(self):
'''Add paste to the server: <1.line> <2.line> ...
default Read paste from stdin.
[text] Every argument on the commandline will be interpreted as
a seperate line of paste.
'''
server = self._createProxy()
o = self.opts_
code = self.args_
if len(self.args_) == 0:
code = [ i.rstrip() for i in sys.stdin.readlines() ]
code = '\n'.join(code)
result = self._callProxy(lambda s: s.paste.addPaste(code, o.name, o.expire * 3600, o.lang, o.private),
server)
return (result['statusmessage'], result)
def actionDelPaste(self):
'''Delete paste from server: <digest>
<digest> Digest of paste you want to remove.
'''
digest = self.args_.pop(0)
result = self._callProxy(lambda s: s.paste.deletePaste(digest))
return (result['statusmessage'], result)
def actionGetPaste(self):
'''Get paste from server: <id>
<id> Id of paste you want to receive.
'''
id = self.args_.pop(0)
result = self._callProxy(lambda s: s.paste.getPaste(id))
return (result['code'], result)
def actionGetLangs(self):
'''Get supported language highlighting types from server'''
result = self._callProxy(lambda s: s.paste.getLanguages())
return ('\n'.join(result['langs']), result)
def actionAddShortUrl(self):
'''Add short-URL: <url>
<url> Short-URL to add
'''
url = self.args_.pop(0)
result = self._callProxy(lambda s: s.paste.addShortURL(url))
return (result['url'], result)
def actionGetShortUrl(self):
'''Resolve short-URL: <url>
<url> Short-URL to get clicks of
'''
url = self.args_.pop(0)
result = self._callProxy(lambda s: s.paste.resolveShortURL(url))
return (result['url'], result)
def actionGetShortUrlClicks(self):
'''Get clicks of short-URL: <url>
<url> Short-URL to get clicks of
'''
url = self.args_.pop(0)
result = self._callProxy(lambda s: s.paste.ShortURLClicks(url))
return (result['count'], result)
def actionHelp(self):
'''Print more verbose help about specific action: <action>
<action> Topic on which you need more verbose help.
'''
if len(self.args_) < 1:
alias = "help"
else:
alias = self.args_.pop(0)
if alias in actions:
fun = actions[alias]
print inspect.getdoc(self.__getattribute__(fun))
print "\naliase: " + " ".join([i for i in actions_r[fun] if i != alias])
else:
print "Error: No such command - %s" % (alias)
OPT_PARSER.print_usage()
sys.exit(0)
# actionAddPaste -> [add, a]
actions_r = {}
# add -> actionAddPaste
# a -> actionAddPaste
actions = {}
# option parser
OPT_PARSER = None
##
# MAIN
##
if __name__ == "__main__":
action_spec = ['actionAddPaste add a',
'actionDelPaste del d rm',
'actionGetPaste get g',
'actionGetLangs getlangs gl langs l',
'actionAddShortUrl addurl',
'actionGetShortUrl geturl',
'actionGetShortUrlClicks getclicks',
'actionHelp help']
for i in action_spec:
aliases = i.split()
cmd = aliases.pop(0)
actions_r[cmd] = aliases
for (k,v) in actions_r.items():
for i in v:
actions[i] = k
usage = "usage: %prog [options] ACTION <args>\n\n" +\
"actions:\n" +\
"\n".join(["%12s\t%s" % (v[0], inspect.getdoc(getattr(Action, k)).split('\n')[0]) \
for (k,v) in actions_r.items()])
running_user = getpass.getuser()
parser = optparse.OptionParser(usage=usage)
parser.add_option('-n', '--name', default=running_user, help="Name of poster")
parser.add_option('-e', '--expire', type=int, default=72, metavar='HOURS',
help='Time at wich paste should expire')
parser.add_option('-l', '--lang', default='Plain', help='Type of language to highlight')
parser.add_option("-p", "--private", action="count", dest="private", default=0,
help='Create hidden paste'),
parser.add_option('-s', '--server', default=DEFAULT_SERVER,
help='Paste server')
parser.add_option('-v', '--verbose', action='count', default=0, help='More output')
(opts, args) = parser.parse_args()
OPT_PARSER = parser
if len(args) == 0:
parser.error('Please provide me with an action')
elif args[0] in actions:
cmd = args.pop(0)
action = Action(args, opts)
try:
(msg, ret) = action.call(actions[cmd])
if opts.verbose == 0:
print msg
else:
print ret
except ActionFailedException, e:
sys.stderr.write('Server Error: %s\n' % e.what())
if opts.verbose >0:
print e.dwhat()
sys.exit(1)
else:
parser.error('Unknown action: %s' % args[0])
| gpl-2.0 | 4,928,760,378,934,636,000 | 35.373206 | 241 | 0.578269 | false | 3.934783 | false | false | false |
wjakob/layerlab | recipes/coated-gold-with-scatmedium.py | 1 | 2082 | # Creates a rough gold layer with a rough dielectric coating containing an
# anisotropic scattering medium
import sys
sys.path.append('.')
from utils.materials import gold
from utils.cie import get_rgb
import layerlab as ll
eta_top = 1.5
# This step integrates the spectral IOR against the CIE XYZ curves to obtain
# equivalent sRGB values. This may seem fairly approximate but turns out to
# yield excellent agreement with spectral reference renders
print('Computing gold IOR parameters')
eta_bot = get_rgb(gold)
alpha_top = 0.1 # Beckmann roughness of top layer (coating)
alpha_bot = 0.1 # Beckmann roughness of bottom layer (gold)
# Medium parameters
g = 0.5 # Scattering anisotropy
albedo = [0.25, 0.0, 0.95] # Single scattering albedo
tau = 0.5 # Optical depth
# Construct quadrature scheme suitable for the material
n_top, m_top = ll.parameterHeuristicMicrofacet(eta=eta_top, alpha=alpha_top)
n_bot, m_bot = ll.parameterHeuristicMicrofacet(eta=eta_bot[0], alpha=alpha_bot)
n_med, m_med = ll.parameterHeuristicHG(g=g)
n = max(n_top, n_bot) # Max of zenith angle discretization
m = m_top # Number of Fourier orders determined by top layer
mu, w = ll.quad.gaussLobatto(n)
print("# of nodes = %i, fourier orders = %i" % (n, m))
# Construct coating layer
print("Creating coating layer")
coating = ll.Layer(mu, w, m)
coating.setMicrofacet(eta=eta_top, alpha=alpha_top)
output = []
for channel in range(3):
# Construct diffuse bottom layer for each channel
print("Creating metal layer")
l = ll.Layer(mu, w, m)
l.setMicrofacet(eta=eta_bot[channel], alpha=alpha_bot)
# Construct medium layer
print("Creating medium layer")
l2 = ll.Layer(mu, w, m)
l2.setHenyeyGreenstein(g=g, albedo=albedo[channel])
l2.expand(tau)
# Apply medium layer
print("Applying medium ..")
l.addToTop(l2)
# Apply coating
print("Applying coating..")
l.addToTop(coating)
output.append(l)
# .. and write to disk
print("Writing to disk..")
storage = ll.BSDFStorage.fromLayerRGB("output.bsdf", *output)
storage.close()
| bsd-2-clause | -3,367,170,747,667,034,600 | 29.617647 | 79 | 0.713737 | false | 3.013025 | false | false | false |
plumer/codana | projectdata.py | 1 | 5358 | class VersionDataManager:
"""Manager of all the information of files and packages in a specific version
Attributes:
packages (list of str): List of packages name
files (list of str): List of all the files in the project
packagedict (dict): Map of packages(key) and filenames(value)
filebugnum (dict): Map of filename(key) and bug numbers(value)
fileattr (dict): Map of filename(key) and the attributes of the file(value)
packageattr (dict): Map of package(key) and the attributes of the package(value)
filedepends (list of tuple): List of all the edges in the dependence graph of all files
packagedepends (list of tuple) : List of all the edges in the dependence graph of all packages
"""
def __init__(self, version='6.0.0'):
self.packagedict = {}
self.fileattr = {}
self.files = []
self.filebugnum = {}
self.packageattr = {}
self.versionArray = []
datafile = open(r'tomcat_history/tomcat' + version + r'/tomcat_pack.txt', 'r')
for packs in datafile:
packslice = packs.strip(' \t\n').split('\t')
self.packagedict[packslice[0]] = []
self.packageattr[packslice[0]] = self.packPackageAttr(packslice[1:])
filenum = 0
if int(packslice[1]) == 0:
continue
for files in datafile:
fileattr = files.strip(' \t\n').split('\t')
if not fileattr[0] in self.packagedict[packslice[0]]:
self.files.append(fileattr[0])
self.packagedict[packslice[0]].append(fileattr[0])
self.fileattr[fileattr[0]] = self.packFileAttr(fileattr[1:])
filenum = filenum + 1
if filenum >= int(packslice[1]):
break
datafile.close()
datafile = open(r'tomcat_history/tomcat' + version + r'/log.txt', 'r')
for record in datafile:
recordslice = record.strip(' \t\n').split('\t')
self.filebugnum[recordslice[0]] = int(recordslice[1])
datafile.close()
self.packages = self.packagedict.keys()
self.packagedepends = []
packdependfile = open(r'tomcat_history/tomcat' + version + r'/tomcat_pack_depends.txt', 'r')
for e in packdependfile:
vertices = e.strip(' \t\n').split(' ')
self.packagedepends.append( (vertices[0], vertices[-1]) )
packdependfile.close()
self.filedepends = []
filedependfile = open(r'tomcat_history/tomcat' + version + r'/tomcat_depends.txt', 'r')
for e in filedependfile:
vertices = e.strip(' \t\n').split('\t')
self.filedepends.append( (vertices[0], vertices[-1]) )
filedependfile.close()
def packPackageAttr(self, attrs):
return {'filenum' : attrs[0],
'codelines' : attrs[1],
'cyclomatic' : attrs[2]}
def packFileAttr(self, attrs):
return {'codelines' : attrs[0],
'cyclomatic' : attrs[1]}
def listFileAttr(self):
return ('codelines', 'cyclomatic')
def listPackageAttr(self):
return ('filenum', 'codelines' , 'cyclomatic')
def getPackages(self):
return self.packages
def getFilenames(self):
return self.files
def getFilesOfPackage(self, package):
return self.packagedict[package]
def getPackageOfFile(self, filename):
return self.filedict[filename]
def getFileAttr(self, filename):
return self.fileattr[filename]
def getPackageAttr(self, package):
return self.packageattr[package]
def getFileDependence(self):
return self.filedepends
def getPackageDependence(self):
return self.packagedepends
def getFileDependenceOfPackage(self, package):
deplist = []
filelist = self.getFilesOfPackage(package)
for dep in self.filedepends:
if dep[0] in filelist and dep[1] in filelist:
deplist.append(dep)
return deplist
def getBugNumberOfFile(self, filename):
if filename in self.filebugnum:
return self.filebugnum[filename]
return 0
def getBugNumberOfPackage(self, package):
bugnum = 0
for filename in self.packagedict[package]:
if filename in self.filebugnum:
bugnum = bugnum + self.filebugnum[filename]
return bugnum
class DataManager:
'''Manage all the data in all versions
Attributes:
versionArray (list): List of all the versions
dataManages (dict): Map of the version(key) and the specified data manager(value)
'''
def __init__(self):
self.versionArray = []
datafile = open(r'tomcat_history/tomcat_list.txt', 'r')
for line in datafile:
self.versionArray.append(line.strip(' \n').strip('tomcat'))
datafile.close()
self.dataManages = {}
for version in self.versionArray:
self.dataManages[version] = VersionDataManager(version)
def getManager(self, version):
return self.dataManages[version]
def getVersionArray(self):
return self.versionArray
if __name__ == '__main__':
dm = DataManager()
dm.getFileDependenceOfPackage('apache.catalina')
| mit | -4,992,400,439,942,177,000 | 35.69863 | 102 | 0.601904 | false | 3.905248 | false | false | false |
chutsu/robotics | prototype/models/two_wheel.py | 1 | 3500 | from math import cos
from math import sin
import numpy as np
import sympy
from sympy import pprint
def two_wheel_2d_model(x, u, dt):
"""Two wheel 2D motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array (x, y, theta)
"""
gdot = np.array([[u[0, 0] * cos(x[2, 0]) * dt],
[u[0, 0] * sin(x[2, 0]) * dt],
[u[1, 0] * dt]])
return x + gdot
def two_wheel_2d_linearized_model(x, u, dt):
"""Two wheel 2D linearized motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array 3x3 matrix of linearized two wheel model
"""
G1 = 1.0
G2 = 0.0
G3 = -u[0, 0] * sin(x[2, 0]) * dt
G4 = 0.0
G5 = 1.0
G6 = u[0, 0] * cos(x[2, 0]) * dt
G7 = 0.0
G8 = 0.0
G9 = 1.0
return np.array([[G1, G2, G3],
[G4, G5, G6],
[G7, G8, G9]])
def two_wheel_3d_model(x, u, dt):
"""Two wheel 3D motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array (x, y, z, theta)
"""
g1 = x[0] + u[0] * cos(x[3]) * dt
g2 = x[1] + u[0] * sin(x[3]) * dt
g3 = x[2] + u[1] * dt
g4 = x[3] + u[2] * dt
return np.array([g1, g2, g3, g4])
def two_wheel_2d_deriv():
""" Symbolic derivation of Jacobian of the 2D two wheel motion model """
x1, x2, x3, x4, x5 = sympy.symbols("x1,x2,x3,x4,x5")
dt = sympy.symbols("dt")
# x, y, theta, v, omega
f1 = x1 + x4 * sympy.cos(x3) * dt
f2 = x2 + x4 * sympy.sin(x3) * dt
f3 = x3 + x5 * dt
f4 = x4
f5 = x5
F = sympy.Matrix([f1, f2, f3, f4, f5])
pprint(F.jacobian([x1, x2, x3, x4, x5]))
def two_wheel_3d_deriv():
""" Symbolic derivation of Jacobian of the 3D two wheel motion model """
x1, x2, x3, x4, x5, x6, x7 = sympy.symbols("x1,x2,x3,x4,x5,x6,x7")
dt = sympy.symbols("dt")
# x1 - x
# x2 - y
# x3 - z
# x4 - theta
# x5 - v
# x6 - omega
# x7 - vz
# x, y, z, theta, v, omega, vz
f1 = x1 + x5 * sympy.cos(x4) * dt
f2 = x2 + x5 * sympy.sin(x4) * dt
f3 = x3 + x7 * dt
f4 = x4 + x6 * dt
f5 = x5
f6 = x6
f7 = x7
F = sympy.Matrix([f1, f2, f3, f4, f5, f6, f7])
pprint(F.jacobian([x1, x2, x3, x4, x5, x6, x7]))
def two_wheel_3d_deriv2():
""" Symbolic derivation of Jacobian of the 3D two wheel motion model """
functions = sympy.symbols("f1,f2,f3,f4,f5,f6,f7,f8,f9")
variables = sympy.symbols("x1,x2,x3,x4,x5,x6,x7,x8,x9")
f1, f2, f3, f4, f5, f6, f7, f8, f9 = functions
x1, x2, x3, x4, x5, x6, x7, x8, x9 = variables
dt = sympy.symbols("dt")
# x1 - x
# x2 - y
# x3 - z
# x4 - theta
# x5 - v
# x6 - vz
# x7 - omega
# x8 - a
# x9 - az
f1 = x1 + x5 * sympy.cos(x4) * dt
f2 = x2 + x5 * sympy.sin(x4) * dt
f3 = x3 + x6 * dt
f4 = x4 + x7 * dt
f5 = x5 + x8 * dt
f6 = x6 + x9 * dt
f7 = x7
f8 = x8
f9 = x9
F = sympy.Matrix([f1, f2, f3, f4, f5, f6, f7, f8, f9])
pprint(F.jacobian([x1, x2, x3, x4, x5, x6, x7, x8, x9]))
| gpl-3.0 | 2,906,790,711,327,816,000 | 19.833333 | 76 | 0.483714 | false | 2.470007 | false | false | false |
lingthio/Flask-User | flask_user/user_mixin.py | 1 | 4450 | """This module implements the UserMixin class for Flask-User.
This Mixin adds required methods to User data-model.
"""
from flask import current_app
from flask_login import UserMixin as FlaskLoginUserMixin
class UserMixin(FlaskLoginUserMixin):
""" This class adds required methods to the User data-model.
Example:
class User(db.Model, UserMixin):
...
"""
def get_id(self):
"""Converts a User ID and parts of a User password hash to a token."""
# This function is used by Flask-Login to store a User ID securely as a browser cookie.
# The last part of the password is included to invalidate tokens when password change.
# user_id and password_ends_with are encrypted, timestamped and signed.
# This function works in tandem with UserMixin.get_user_by_token()
user_manager = current_app.user_manager
user_id = self.id
password_ends_with = '' if user_manager.USER_ENABLE_AUTH0 else self.password[-8:]
user_token = user_manager.generate_token(
user_id, # User ID
password_ends_with, # Last 8 characters of user password
)
# print("UserMixin.get_id: ID:", self.id, "token:", user_token)
return user_token
@classmethod
def get_user_by_token(cls, token, expiration_in_seconds=None):
# This function works in tandem with UserMixin.get_id()
# Token signatures and timestamps are verified.
# user_id and password_ends_with are decrypted.
# Verifies a token and decrypts a User ID and parts of a User password hash
user_manager = current_app.user_manager
data_items = user_manager.verify_token(token, expiration_in_seconds)
# Verify password_ends_with
token_is_valid = False
if data_items:
# Load user by User ID
user_id = data_items[0]
password_ends_with = data_items[1]
user = user_manager.db_manager.get_user_by_id(user_id)
user_password = '' if user_manager.USER_ENABLE_AUTH0 else user.password[-8:]
# Make sure that last 8 characters of user password matches
token_is_valid = user and user_password==password_ends_with
return user if token_is_valid else None
def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Translates a list of role objects to a list of role_names
user_manager = current_app.user_manager
role_names = user_manager.db_manager.get_user_roles(self)
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if not role_name in role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True
| mit | 2,653,800,167,023,835,600 | 42.203883 | 106 | 0.602921 | false | 4.405941 | false | false | false |
abrt/faf | src/pyfaf/storage/migrations/versions/168c63b81f85_report_history_default_value.py | 1 | 1945 | # Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Report history default value
Revision ID: 168c63b81f85
Revises: 183a15e52a4f
Create Date: 2016-12-13 15:49:32.883743
"""
from alembic.op import alter_column, execute
# revision identifiers, used by Alembic.
revision = '168c63b81f85'
down_revision = '1c4d6317721a'
def upgrade() -> None:
alter_column('reporthistorydaily', 'unique', server_default="0")
alter_column('reporthistoryweekly', 'unique', server_default="0")
alter_column('reporthistorymonthly', 'unique', server_default="0")
execute('UPDATE reporthistorydaily SET "unique" = 0 WHERE "unique" IS NULL')
execute('UPDATE reporthistoryweekly SET "unique" = 0 WHERE "unique" IS NULL')
execute('UPDATE reporthistorymonthly SET "unique" = 0 WHERE "unique" IS NULL')
def downgrade() -> None:
alter_column('reporthistorydaily', 'unique', server_default=None)
alter_column('reporthistoryweekly', 'unique', server_default=None)
alter_column('reporthistorymonthly', 'unique', server_default=None)
execute('UPDATE reporthistorydaily SET "unique" = NULL WHERE "unique" = 0')
execute('UPDATE reporthistoryweekly SET "unique" = NULL WHERE "unique" = 0')
execute('UPDATE reporthistorymonthly SET "unique" = NULL WHERE "unique" = 0')
| gpl-3.0 | 7,853,489,964,225,810,000 | 37.137255 | 82 | 0.731105 | false | 3.504505 | false | false | false |
sradevski/homeAutomate | scripts/laptop_on_network.py | 1 | 1994 | #!/usr/bin/python
import remote_core as core
import os
import sys
import nmap
import datetime
import time
import re
import go_to_sleep
try:
nm = nmap.PortScanner() # instance of nmap.PortScanner
except nmap.PortScannerError:
print('Nmap not found', sys.exc_info()[0])
sys.exit(0)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(0)
macAddressToSearch = '64:76:BA:A3:43:B0'
laptopHasBeenTurnedOn = False
disconnectedCounter = 0
def checkIfLaptopOn():
global macAddressToSearch, laptopHasBeenTurnedOn, disconnectedCounter
curHosts = []
# nm.scan(hosts = '192.168.11.1-8', arguments = '-n -sP -PS 7,22,88,443,80,660,2195 -PA 80,22,443 -PU -T3')
nm.scan(hosts = '192.168.11.1-8', arguments = '-n -sn -PR')
for host in nm.all_hosts():
try:
mac = nm[host]['addresses']['mac']
vendor = nm[host]['vendor'][mac]
except:
vendor = mac = 'unknown'
curHosts.append(mac)
localtime = time.asctime(time.localtime(time.time()))
print('============ {0} ============'.format(localtime))
for host in curHosts:
print(host)
config = core.load_config();
if config['location']['am_home']:
if macAddressToSearch not in curHosts:
if laptopHasBeenTurnedOn:
if disconnectedCounter > 3:
wentToSleepScript()
laptopHasBeenTurnedOn = False
disconnectedCounter += 1
else:
laptopHasBeenTurnedOn = True
def wentToSleepScript():
time.sleep(10)
go_to_sleep.go_to_sleep()
# print("SLEEPING")
if __name__ == '__main__':
start_at_hour = 22
stop_at_hour = 2
sleep_seconds = 60 * 60 * (start_at_hour - stop_at_hour) - 20
while True:
localtime = time.localtime(time.time())
if localtime.tm_hour > stop_at_hour and localtime.tm_hour < start_at_hour:
time.sleep(sleep_seconds - (60 * 60 * (start_at_hour - localtime.tm_hour)))
time.sleep(10)
checkIfLaptopOn()
| mit | 6,664,738,618,122,529,000 | 25.586667 | 110 | 0.61986 | false | 3.091473 | false | false | false |
JordanReiter/django-notification | notification/views.py | 1 | 6596 | from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
try:
from django.contrib.syndication.views import Feed
except ImportError:
from django.contrib.syndication.views import feed as Feed
from notification.models import *
from notification.decorators import basic_auth_required, simple_basic_auth_callback
from notification.feeds import NoticeUserFeed
@basic_auth_required(realm="Notices Feed", callback_func=simple_basic_auth_callback)
def feed_for_user(request):
"""
An atom feed for all unarchived :model:`notification.Notice`s for a user.
"""
url = "feed/%s" % request.user.username
return Feed(request, url, {
"feed": NoticeUserFeed,
})
@login_required
def notices(request):
"""
The main notices index view.
Template: :template:`notification/notices.html`
Context:
notices
A list of :model:`notification.Notice` objects that are not archived
and to be displayed on the site.
"""
notices = Notice.objects.notices_for(request.user, on_site=True)
return render_to_response("notification/notices.html", {
"notices": notices,
}, context_instance=RequestContext(request))
@login_required
def notice_settings(request):
"""
The notice settings view.
Template: :template:`notification/notice_settings.html`
Context:
notice_types
A list of all :model:`notification.NoticeType` objects.
notice_settings
A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA``
and ``rows`` containing a list of dictionaries: ``notice_type``, a
:model:`notification.NoticeType` object and ``cells``, a list of
tuples whose first value is suitable for use in forms and the second
value is ``True`` or ``False`` depending on a ``request.POST``
variable called ``form_label``, whose valid value is ``on``.
"""
notice_types = NoticeType.objects.all()
settings_table = []
for notice_type in notice_types:
settings_row = []
for medium_id, medium_display in NOTICE_MEDIA:
form_label = "%s_%s" % (notice_type.label, medium_id)
setting = get_notification_setting(request.user, notice_type, medium_id)
if request.method == "POST":
if request.POST.get(form_label) == "on":
if not setting.send:
setting.send = True
setting.save()
else:
if setting.send:
setting.send = False
setting.save()
settings_row.append((form_label, setting.send))
settings_table.append({"notice_type": notice_type, "cells": settings_row})
if request.method == "POST":
next_page = request.POST.get("next_page", ".")
return HttpResponseRedirect(next_page)
notice_settings = {
"column_headers": [medium_display for medium_id, medium_display in NOTICE_MEDIA],
"rows": settings_table,
}
return render_to_response("notification/notice_settings.html", {
"notice_types": notice_types,
"notice_settings": notice_settings,
}, context_instance=RequestContext(request))
@login_required
def single(request, id, mark_seen=True):
"""
Detail view for a single :model:`notification.Notice`.
Template: :template:`notification/single.html`
Context:
notice
The :model:`notification.Notice` being viewed
Optional arguments:
mark_seen
If ``True``, mark the notice as seen if it isn't
already. Do nothing if ``False``. Default: ``True``.
"""
notice = get_object_or_404(Notice, id=id)
if request.user == notice.recipient:
if mark_seen and notice.unseen:
notice.unseen = False
notice.save()
return render_to_response("notification/single.html", {
"notice": notice,
}, context_instance=RequestContext(request))
raise Http404
@login_required
def archive(request, noticeid=None, next_page=None):
"""
Archive a :model:`notices.Notice` if the requesting user is the
recipient or if the user is a superuser. Returns a
``HttpResponseRedirect`` when complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.archive()
else: # you can archive other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def delete(request, noticeid=None, next_page=None):
"""
Delete a :model:`notices.Notice` if the requesting user is the recipient
or if the user is a superuser. Returns a ``HttpResponseRedirect`` when
complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.delete()
else: # you can delete other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def mark_all_seen(request):
"""
Mark all unseen notices for the requesting user as seen. Returns a
``HttpResponseRedirect`` when complete.
"""
for notice in Notice.objects.notices_for(request.user, unseen=True):
notice.unseen = False
notice.save()
return HttpResponseRedirect(reverse("notification_notices"))
| mit | 8,042,785,939,941,627,000 | 32.482234 | 89 | 0.622347 | false | 4.336621 | false | false | false |
alexwaters/python-readability-api | readability/models.py | 1 | 5472 | # -*- coding: utf-8 -*-
"""
readability.models
~~~~~~~~~~~~~~~~~~
This module provides the core Readability API models.
"""
from .helpers import to_python, to_api
class BaseResource(object):
"""A Base BaseResource object."""
def __init__(self):
super(BaseResource, self).__init__()
self._rdd = None
def __dir__(self):
d = self.__dict__.copy()
try:
del d['_rdd']
except KeyError:
pass
return d.keys()
class Bookmark(BaseResource):
"""Bookmark API Model."""
def __init__(self):
self.id = None
self.user_id = None
self.read_percent = None
self.date_updated = None
self.favorite = None
self.archive = None
self.date_archived = None
self.date_opened = None
self.date_added = None
self.article = None
def __repr__(self):
return '<bookmark id="%s" favorite="%s" archive="%s" read_percent="%s">' % (self.id, self.favorite, self.archive, self.read_percent)
@staticmethod
def new_from_dict(d, rdd=None):
b = to_python(
obj=Bookmark(), in_dict=d,
string_keys = (
'id', 'user_id', 'read_percent', 'favorite', 'archive',
'author',
),
date_keys = ('date_updated', 'date_archived', 'date_opened', 'date_added'),
object_map = {'article': Article},
_rdd = rdd
)
return b
def delete(self):
"""Deletes Bookmark."""
return self._rdd._delete_resource(('bookmarks', self.id))
def update(self):
"""Updates Bookmark."""
args = to_api(
dict(
favorite=self.favorite,
archive=self.archive,
read_percent=self.read_percent,
),
int_keys=('favorite', 'archive')
)
r = self._rdd._post_resource(('bookmarks', self.id), **args)
return r
class Article(BaseResource):
def __init__(self):
self.id = None
self.domain = None
self.title = None
self.url = None
self.short_url = None
self.author = None
self.word_count = None
self.content = None
self.excerpt = None
self.date_published = None
self.next_page_href = None
self.processed = None
self.content_size = None
def __repr__(self):
return '<article id="%s">' % (self.id,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Article(), in_dict=d,
string_keys = (
'id', 'domain', 'title', 'url', 'short_url', 'author',
'word_count', 'content', 'excerpt', 'next_page_href',
'processed', 'content_size',
),
date_keys = ('date_published',),
_rdd = rdd
)
class Domain(BaseResource):
def __init__(self):
super(Domain, self).__init__()
self.fqdn = None
self.articles_ref = None
def __repr__(self):
return '<domain fqdn="%s">' % (self.fqdn,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Domain(), in_dict=d,
string_keys = ('fqdn', 'articles_ref'),
_rdd = rdd
)
def articles(self, **filters):
"""Returns Article list, filtered by Domain."""
return self._rdd.get_articles(domain=self.fqdn, **filters)
def contributions(self, **filters):
"""Returns Article list, filtered by Domain."""
return self._rdd.get_contributions(domain=self.fqdn, **filters)
class Contribution(BaseResource):
def __init__(self):
super(Contribution, self).__init__()
self.date = None
self.contribution = None
self.user = None
self.domain = None
self.num_bookmarks = None
def __repr__(self):
return '<contribution domain="%s">' % (self.domain,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Contribution(), in_dict=d,
string_keys = ('contribution', 'user', 'domain', 'num_bookmarks'),
date_keys = ('date'),
_rdd = rdd
)
class User(BaseResource):
"""User API Model."""
def __init__(self):
self.username = None
self.first_name = None
self.last_name = None
self.date_joined = None
def __repr__(self):
return '<user name="%s">' % (self.username,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=User(), in_dict=d,
string_keys = ('username', 'first_name'),
date_keys = ('date_joined',),
_rdd=rdd
)
def bookmarks(self, **filters):
"""Returns Bookmark list, filtered by User."""
if self.username == self._rdd.username:
return self._rdd.get_bookmarks(user=self.username, **filters)
else:
return self._rdd.get_bookmarks_by_user(self.username, **filters)
def contributions(self, **filters):
"""Returns Contributions list, filtered by User."""
if self.username == self._rdd.username:
return self._rdd.get_contributions(user=self.username, **filters)
else:
return self._rdd.get_contributions_by_user(self.username, **filters)
| mit | -2,055,132,855,764,576,500 | 22.088608 | 140 | 0.524671 | false | 4.01173 | false | false | false |
kaphka/catconv | convert.py | 1 | 1091 | import argparse
import signal
from tqdm import tqdm
import catconv.operations as co
import catconv.stabi as sb
exit = False
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
exit = True
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("target")
parser.add_argument("-u", "--update", help="overwrite previous results",
action="store_true")
args = parser.parse_args()
source = sb.op.normpath(args.source)
target = sb.op.normpath(args.target)
data_dir, target_cat_name = sb.op.split(target)
pages = map(sb.page_from_path, sb.catalog_pages(source,ext=".tif"))
print("Source catalog:")
print("path:", source)
print("pages:", len(pages))
conversion = {"ext": ".jpg", "remove_type": True, "to_cat": data_dir,"cat": target_cat_name}
from_to = [(page, sb.convert_page_path(page, conversion)) for page in pages]
for ft in tqdm(from_to):
if exit:
break
from_page, to_page = ft
if sb.op.isfile(to_page['path']) and not args.update:
continue
else:
co.convert_to_png(*ft)
| apache-2.0 | 5,971,109,955,525,650,000 | 24.372093 | 92 | 0.669111 | false | 3.190058 | false | false | false |
alirizakeles/zato | code/zato-zmq/src/zato/zmq_/mdp/worker.py | 1 | 9531 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
import time
from datetime import datetime, timedelta
# ZeroMQ
import zmq.green as zmq
# Zato
from zato.zmq_.mdp import BaseZMQConnection, const, EventWorkerDisconnect, EventWorkerHeartbeat, EventReady, EventWorkerReply
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
class Worker(BaseZMQConnection):
""" Standalone implementation of a worker for ZeroMQ Majordomo Protocol 0.1 http://rfc.zeromq.org/spec:7
"""
def __init__(self, service_name, broker_address='tcp://localhost:47047', linger=0, poll_interval=100, log_details=False,
heartbeat=3, heartbeat_mult=2, reconnect_sleep=2):
self.service_name = service_name
super(Worker, self).__init__(broker_address, linger, poll_interval, log_details)
# How often, in seconds, to send a heartbeat to the broker or expect one from the broker
self.heartbeat = heartbeat
# If self.heartbeat * self.heartbeat_mult is exceeded, we assume the broker is down
self.heartbeat_mult = heartbeat_mult
# How long, in seconds, to wait before attempting to reconnect to the broker
self.reconnect_sleep = reconnect_sleep
# When did we last hear from the broker
self.broker_last_heartbeat = None
# When did we last send our own heartbeat to the broker
self.worker_last_heartbeat = None
# Timestamp of when we started to run
self.last_connected = datetime.utcnow()
self.has_debug = logger.isEnabledFor(logging.DEBUG)
# Maps event IDs to methods that handle a given one
self.handle_event_map = {
const.v01.request_to_worker: self.on_event_request_to_worker,
const.v01.heartbeat: self.on_event_heartbeat,
const.v01.disconnect: self.on_event_disconnect,
}
# ################################################################################################################################
def connect(self):
logger.info('Connecting to broker %s', self.broker_address)
# Open ZeroMQ sockets first
# From worker to broker
self.client_socket.connect(self.broker_address)
# From broker to worker
self.worker_socket = self.ctx.socket(zmq.DEALER)
self.worker_socket.linger = self.linger
self.worker_poller = zmq.Poller()
self.worker_poller.register(self.worker_socket, zmq.POLLIN)
self.worker_socket.connect(self.broker_address)
# Ok, we are ready
self.notify_ready()
# We can assume that the broker received our message
self.last_connected = datetime.utcnow()
# ################################################################################################################################
def stop(self):
self.worker_poller.unregister(self.worker_socket)
self.worker_socket.close()
self.stop_client_socket()
self.connect_client_socket()
logger.info('Stopped worker for %s', self.broker_address)
# ################################################################################################################################
def needs_reconnect(self):
base_timestamp = self.broker_last_heartbeat if self.broker_last_heartbeat else self.last_connected
return datetime.utcnow() >= base_timestamp + timedelta(seconds=self.heartbeat * self.heartbeat_mult)
# ################################################################################################################################
def reconnect(self):
last_hb = '{} (UTC)'.format(self.broker_last_heartbeat.isoformat()) if self.broker_last_heartbeat else 'never'
logger.info('Sleeping for %ss before reconnecting to broker %s, last HB from broker: %s',
self.reconnect_sleep, self.broker_address, last_hb)
time.sleep(self.reconnect_sleep)
logger.info('Reconnecting to broker %s', self.broker_address)
self.stop()
self.connect()
# Let's give the other side a moment to reply to our ready event
time.sleep(self.reconnect_sleep)
# ################################################################################################################################
def needs_hb_to_broker(self):
return datetime.utcnow() >= self.worker_last_heartbeat + timedelta(seconds=self.heartbeat)
# ################################################################################################################################
def serve_forever(self):
# To speed up look-ups
log_details = self.log_details
# Main loop
while self.keep_running:
try:
items = self.worker_poller.poll(self.poll_interval)
except KeyboardInterrupt:
self.notify_disconnect()
break
if items:
msg = self.worker_socket.recv_multipart()
if log_details:
logger.info('Received msg at %s %s', self.broker_address, msg)
self.handle(msg)
else:
if log_details:
logger.info('No items for worker at %s', self.broker_address)
if self.needs_hb_to_broker():
self.notify_heartbeat()
if self.needs_reconnect():
self.reconnect()
# ################################################################################################################################
def on_event_request_to_worker(self, msg):
logger.info('In _handle %s', msg)
return datetime.utcnow().isoformat()
# ################################################################################################################################
def on_event_heartbeat(self, *ignored):
""" A no-op since self.handle already handles heartbeats from the broker.
"""
# ################################################################################################################################
def on_event_disconnect(self, *ignored):
""" Our broker tells us to disconnect - according to the spec we now must re-open the connection.
"""
self.reconnect()
# ################################################################################################################################
def handle(self, msg):
logger.info('Handling %s', msg)
# Since we received this message, it means the broker is up so the message,
# no matter what event it is, allows us to update the timestamp of the last HB from broker
self.broker_last_heartbeat = datetime.utcnow()
sender_id = None
body = None
command = msg[2]
if command == const.v01.request_to_worker:
sender_id = msg[3]
body = msg[4]
# Hand over the message to an actual implementation and reply if told to
response = self.handle_event_map[command](body)
if response:
self.send(EventWorkerReply(response, sender_id).serialize())
# Message handled, we are ready to handle a new one, assuming this one was a request
if command == const.v01.request_to_worker:
self.notify_ready()
# ################################################################################################################################
def send(self, data, needs_hb=True):
""" Sends data to the broker and updates an internal timer of when the last time we send a heartbeat to the broker
since sending anything in that direction should be construed by the broker as a heartbeat itself.
"""
# Send data first
self.worker_socket.send_multipart(data)
# Update the timer
if needs_hb:
self.worker_last_heartbeat = datetime.utcnow()
# ################################################################################################################################
def notify_ready(self):
""" Notify the broker that we are ready to handle a new message.
"""
self.send(EventReady(self.service_name).serialize())
# ################################################################################################################################
def notify_heartbeat(self):
""" Notify the broker that we are still around.
"""
self.send(EventWorkerHeartbeat().serialize())
# ################################################################################################################################
def notify_disconnect(self):
""" Notify the broker that we are to disconnect from it.
"""
self.send(EventWorkerDisconnect().serialize(), needs_hb=False)
# ################################################################################################################################
if __name__ == '__main__':
w = Worker(b'My service', 'tcp://localhost:47047')
w.connect()
w.serve_forever()
| gpl-3.0 | -5,260,113,745,436,168,000 | 37.587045 | 130 | 0.484 | false | 5.113197 | false | false | false |
antonygc/liblightbase | liblightbase/lbdoc/metaclass.py | 1 | 6065 | from liblightbase import lbutils
from liblightbase.lbdoc.metadata import DocumentMetadata
def generate_metaclass(struct, base=None):
"""
Generate document metaclass. The document metaclass
is an abstraction of document model defined by base
structures.
@param struct: Field or Group object.
@param base: Base object or None.
"""
build_metadata = False
if base is None:
base = struct
build_metadata = True
snames = struct.content.__snames__
rnames = struct.content.__rnames__
class MetaClass(object):
"""
Document metaclass. Describes the structures defifined by
document structure model.
"""
# @property __valreq__: Flag used to validate required
# fields or not.
__valreq__ = True
# @property __slots__: reserves space for the declared
# variables and prevents the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ['_' + sname for sname in snames]
if build_metadata:
__slots__.append('__metadata__')
def __init__(self, **kwargs):
""" Document MetaClass constructor
"""
if self.__valreq__:
lbutils.validate_required(rnames, kwargs)
for arg in kwargs:
setattr(self, arg, kwargs[arg])
for childstruct in struct.content:
structname, prop = generate_property(base, childstruct)
setattr(MetaClass, structname, prop)
if build_metadata:
MetaClass._metadata = build_metadata_prop()
MetaClass.__name__ = struct.metadata.name
return MetaClass
def generate_property(base, struct):
"""
Make python's property based on structure attributes.
@param base: Base object.
@param struct: Field or Group object.
"""
if struct.is_field:
structname = struct.name
elif struct.is_group:
structname = struct.metadata.name
attr_name = '_' + structname
def getter(self):
value = getattr(self, attr_name)
if struct.is_field:
return getattr(value, '__value__')
return value
def setter(self, value):
struct_metaclass = base.metaclass(structname)
if struct.is_field:
value = struct_metaclass(value)
elif struct.is_group:
if struct.metadata.multivalued:
msg = 'object {} should be instance of {}'.format(
struct.metadata.name, list)
assert isinstance(value, list), msg
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assertion = all(isinstance(element, struct_metaclass) \
for element in value)
assert assertion, msg
value = generate_multimetaclass(struct,
struct_metaclass)(value)
else:
msg = '{} object should be an instance of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(value, struct_metaclass), msg
setattr(self, attr_name, value)
def deleter(self):
delattr(self, attr_name)
return structname, property(getter,
setter, deleter, structname)
def build_metadata_prop():
def fget(self):
return self.__metadata__
def fset(self, value):
msg = '_metadata attribute should be a DocumentMetadata object.'
assert isinstance(value, DocumentMetadata)
self.__metadata__ = value
def fdel(self):
del self.__metadata__
return property(fget, fset, fdel, '_metadata')
def generate_multimetaclass(struct, struct_metaclass):
"""
Generate metaclass to use with multivalued groups.
@param struct: Field or Group object
@param struct_metaclass: The struct Metaclass
"""
class MultiGroupMetaClass(list):
"""
Multivalued Group Metaclass. Metaclass used to ensure list
elements are instances of right metaclasses.
"""
def __setitem__(self, index, element):
""" x.__setitem__(y, z) <==> x[y] = z
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).__setitem__(index,
element)
def append(self, element):
""" L.append(object) -- append object to end
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).append(element)
return MultiGroupMetaClass
def generate_field_metaclass(field, base):
"""
Generate field metaclass. The field metaclass
validates incoming value against fields' datatype.
@param field: Field object.
@param base: Base object.
"""
class FieldMetaClass(object):
"""
Field MetaClass. validates incoming
value against fields' datatype.
"""
def __init__(self, value):
self.__value__ = value
def __setattr__(self, obj, value):
validator = field._datatype.__schema__(base, field, 0)
if field.multivalued is True:
msg = 'Expected type list for {}, but found {}'
assert isinstance(value, list), msg.format(
field.name, type(value))
value = [validator(element) for element in value]
else:
value = validator(value)
super(FieldMetaClass, self).__setattr__('__value__', value)
def __getattr__(self, obj):
return super(FieldMetaClass, self).__getattribute__('__value__')
FieldMetaClass.__name__ = field.name
return FieldMetaClass
| gpl-2.0 | 6,355,603,194,399,791,000 | 33.460227 | 76 | 0.588458 | false | 4.658218 | false | false | false |
aio-libs/aiozmq | examples/core_dealer_router.py | 1 | 1579 | import asyncio
import aiozmq
import zmq
class ZmqDealerProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, queue, on_close):
self.queue = queue
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.queue.put_nowait(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
class ZmqRouterProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, on_close):
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.transport.write(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
async def go():
router_closed = asyncio.Future()
dealer_closed = asyncio.Future()
router, _ = await aiozmq.create_zmq_connection(
lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*"
)
addr = list(router.bindings())[0]
queue = asyncio.Queue()
dealer, _ = await aiozmq.create_zmq_connection(
lambda: ZmqDealerProtocol(queue, dealer_closed), zmq.DEALER, connect=addr
)
for i in range(10):
msg = (b"data", b"ask", str(i).encode("utf-8"))
dealer.write(msg)
answer = await queue.get()
print(answer)
dealer.close()
await dealer_closed
router.close()
await router_closed
def main():
asyncio.run(go())
print("DONE")
if __name__ == "__main__":
main()
| bsd-2-clause | -5,655,556,457,899,408,000 | 21.239437 | 86 | 0.621279 | false | 3.455142 | false | false | false |
pyfa-org/eos | eos/item/mixin/effect_stats/remote_repair.py | 1 | 1829 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.eve_obj.effect.repairs.base import RemoteArmorRepairEffect
from eos.eve_obj.effect.repairs.base import RemoteShieldRepairEffect
from eos.item.mixin.base import BaseItemMixin
class RemoteRepairMixin(BaseItemMixin):
def __repair_effect_iter(self, effect_class):
for effect in self._type_effects.values():
if not isinstance(effect, effect_class):
continue
if effect.id not in self._running_effect_ids:
continue
yield effect
def get_armor_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteArmorRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
def get_shield_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteShieldRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
| lgpl-3.0 | -2,539,301,026,785,657,300 | 37.914894 | 80 | 0.636413 | false | 4.04646 | false | false | false |
bollu/polymage | sandbox/apps/python/img_proc/harris/init.py | 1 | 1485 | import sys
import os.path
from PIL import Image
import numpy as np
from arg_parser import parse_args
from printer import print_header, print_usage, print_line
def init_images(app_data):
print("[init.py] : initializing images...")
app_args = app_data['app_args']
# input image:
img_path = app_args.img_file
img = np.array(Image.open(img_path).convert('1'))
rows, cols = img.shape
# convert to float image
IN = np.array(img)
IN = IN.astype(np.float32).ravel()
# final output image
OUT = np.zeros((rows, cols), np.float32).ravel()
img_data = {}
img_data['IN'] = IN
img_data['OUT'] = OUT
app_data['img_data'] = img_data
app_data['rows'] = rows
app_data['cols'] = cols
return
def get_input(app_data):
# parse the command-line arguments
app_args = parse_args()
app_data['app_args'] = app_args
app_data['mode'] = app_args.mode
app_data['runs'] = int(app_args.runs)
app_data['graph_gen'] = bool(app_args.graph_gen)
app_data['timer'] = app_args.timer
# storage optimization
app_data['optimize_storage'] = bool(app_args.optimize_storage)
# early freeing of allocated arrays
app_data['early_free'] = bool(app_args.early_free)
# pool allocate option
app_data['pool_alloc'] = bool(app_args.pool_alloc)
return
def init_all(app_data):
pipe_data = {}
app_data['pipe_data'] = pipe_data
get_input(app_data)
init_images(app_data)
return
| apache-2.0 | -1,343,414,416,860,723,500 | 22.203125 | 66 | 0.630976 | false | 3.207343 | false | false | false |
Endika/mitmproxy | libmproxy/contentviews.py | 1 | 16688 | """
Mitmproxy Content Views
=======================
mitmproxy includes a set of content views which can be used to format/decode/highlight data.
While they are currently used for HTTP message bodies only, the may be used in other contexts
in the future, e.g. to decode protobuf messages sent as WebSocket frames.
Thus, the View API is very minimalistic. The only arguments are `data` and `**metadata`,
where `data` is the actual content (as bytes). The contents on metadata depend on the protocol in
use. For HTTP, the message headers are passed as the ``headers`` keyword argument.
"""
from __future__ import (absolute_import, print_function, division)
import cStringIO
import json
import logging
import subprocess
import sys
import lxml.html
import lxml.etree
import datetime
from PIL import Image
from PIL.ExifTags import TAGS
import html2text
import six
from netlib.odict import ODict
from netlib import encoding
from netlib.utils import clean_bin, hexdump, urldecode, multipartdecode, parse_content_type
from . import utils
from .exceptions import ContentViewException
from .contrib import jsbeautifier
from .contrib.wbxml.ASCommandResponse import ASCommandResponse
try:
import pyamf
from pyamf import remoting, flex
except ImportError: # pragma nocover
pyamf = None
try:
import cssutils
except ImportError: # pragma nocover
cssutils = None
else:
cssutils.log.setLevel(logging.CRITICAL)
cssutils.ser.prefs.keepComments = True
cssutils.ser.prefs.omitLastSemicolon = False
cssutils.ser.prefs.indentClosingBrace = False
cssutils.ser.prefs.validOnly = False
# Default view cutoff *in lines*
VIEW_CUTOFF = 512
KEY_MAX = 30
def format_dict(d):
"""
Helper function that transforms the given dictionary into a list of
("key", key )
("value", value)
tuples, where key is padded to a uniform width.
"""
max_key_len = max(len(k) for k in d.keys())
max_key_len = min(max_key_len, KEY_MAX)
for key, value in d.items():
key += ":"
key = key.ljust(max_key_len + 2)
yield [
("header", key),
("text", value)
]
def format_text(text):
"""
Helper function that transforms bytes into the view output format.
"""
for line in text.splitlines():
yield [("text", line)]
class View(object):
name = None
prompt = ()
content_types = []
def __call__(self, data, **metadata):
"""
Transform raw data into human-readable output.
Args:
data: the data to decode/format as bytes.
metadata: optional keyword-only arguments for metadata. Implementations must not
rely on a given argument being present.
Returns:
A (description, content generator) tuple.
The content generator yields lists of (style, text) tuples, where each list represents
a single line. ``text`` is a unfiltered byte string which may need to be escaped,
depending on the used output.
Caveats:
The content generator must not yield tuples of tuples,
because urwid cannot process that. You have to yield a *list* of tuples per line.
"""
raise NotImplementedError()
class ViewAuto(View):
name = "Auto"
prompt = ("auto", "a")
content_types = []
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
ctype = headers.get("content-type")
if ctype:
ct = parse_content_type(ctype) if ctype else None
ct = "%s/%s" % (ct[0], ct[1])
if ct in content_types_map:
return content_types_map[ct][0](data, **metadata)
elif utils.isXML(data):
return get("XML")(data, **metadata)
if utils.isMostlyBin(data):
return get("Hex")(data)
return get("Raw")(data)
class ViewRaw(View):
name = "Raw"
prompt = ("raw", "r")
content_types = []
def __call__(self, data, **metadata):
return "Raw", format_text(data)
class ViewHex(View):
name = "Hex"
prompt = ("hex", "e")
content_types = []
@staticmethod
def _format(data):
for offset, hexa, s in hexdump(data):
yield [
("offset", offset + " "),
("text", hexa + " "),
("text", s)
]
def __call__(self, data, **metadata):
return "Hex", self._format(data)
class ViewXML(View):
name = "XML"
prompt = ("xml", "x")
content_types = ["text/xml"]
def __call__(self, data, **metadata):
parser = lxml.etree.XMLParser(
remove_blank_text=True,
resolve_entities=False,
strip_cdata=False,
recover=False
)
try:
document = lxml.etree.fromstring(data, parser)
except lxml.etree.XMLSyntaxError:
return None
docinfo = document.getroottree().docinfo
prev = []
p = document.getroottree().getroot().getprevious()
while p is not None:
prev.insert(
0,
lxml.etree.tostring(p)
)
p = p.getprevious()
doctype = docinfo.doctype
if prev:
doctype += "\n".join(prev).strip()
doctype = doctype.strip()
s = lxml.etree.tostring(
document,
pretty_print=True,
xml_declaration=True,
doctype=doctype or None,
encoding=docinfo.encoding
)
return "XML-like data", format_text(s)
class ViewJSON(View):
name = "JSON"
prompt = ("json", "s")
content_types = ["application/json"]
def __call__(self, data, **metadata):
pretty_json = utils.pretty_json(data)
if pretty_json:
return "JSON", format_text(pretty_json)
class ViewHTML(View):
name = "HTML"
prompt = ("html", "h")
content_types = ["text/html"]
def __call__(self, data, **metadata):
if utils.isXML(data):
parser = lxml.etree.HTMLParser(
strip_cdata=True,
remove_blank_text=True
)
d = lxml.html.fromstring(data, parser=parser)
docinfo = d.getroottree().docinfo
s = lxml.etree.tostring(
d,
pretty_print=True,
doctype=docinfo.doctype,
encoding='utf8'
)
return "HTML", format_text(s)
class ViewHTMLOutline(View):
name = "HTML Outline"
prompt = ("html outline", "o")
content_types = ["text/html"]
def __call__(self, data, **metadata):
data = data.decode("utf-8")
h = html2text.HTML2Text(baseurl="")
h.ignore_images = True
h.body_width = 0
outline = h.handle(data)
return "HTML Outline", format_text(outline)
class ViewURLEncoded(View):
name = "URL-encoded"
prompt = ("urlencoded", "u")
content_types = ["application/x-www-form-urlencoded"]
def __call__(self, data, **metadata):
d = urldecode(data)
return "URLEncoded form", format_dict(ODict(d))
class ViewMultipart(View):
name = "Multipart Form"
prompt = ("multipart", "m")
content_types = ["multipart/form-data"]
@staticmethod
def _format(v):
yield [("highlight", "Form data:\n")]
for message in format_dict(ODict(v)):
yield message
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
v = multipartdecode(headers, data)
if v:
return "Multipart form", self._format(v)
if pyamf:
class DummyObject(dict):
def __init__(self, alias):
dict.__init__(self)
def __readamf__(self, input):
data = input.readObject()
self["data"] = data
def pyamf_class_loader(s):
for i in pyamf.CLASS_LOADERS:
if i != pyamf_class_loader:
v = i(s)
if v:
return v
return DummyObject
pyamf.register_class_loader(pyamf_class_loader)
class ViewAMF(View):
name = "AMF"
prompt = ("amf", "f")
content_types = ["application/x-amf"]
def unpack(self, b, seen=set([])):
if hasattr(b, "body"):
return self.unpack(b.body, seen)
if isinstance(b, DummyObject):
if id(b) in seen:
return "<recursion>"
else:
seen.add(id(b))
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, dict):
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, list):
return [self.unpack(i) for i in b]
elif isinstance(b, datetime.datetime):
return str(b)
elif isinstance(b, flex.ArrayCollection):
return [self.unpack(i, seen) for i in b]
else:
return b
def _format(self, envelope):
for target, message in iter(envelope):
if isinstance(message, pyamf.remoting.Request):
yield [
("header", "Request: "),
("text", str(target)),
]
else:
yield [
("header", "Response: "),
("text", "%s, code %s" % (target, message.status)),
]
s = json.dumps(self.unpack(message), indent=4)
for msg in format_text(s):
yield msg
def __call__(self, data, **metadata):
envelope = remoting.decode(data, strict=False)
if envelope:
return "AMF v%s" % envelope.amfVersion, self._format(envelope)
class ViewJavaScript(View):
name = "JavaScript"
prompt = ("javascript", "j")
content_types = [
"application/x-javascript",
"application/javascript",
"text/javascript"
]
def __call__(self, data, **metadata):
opts = jsbeautifier.default_options()
opts.indent_size = 2
res = jsbeautifier.beautify(data, opts)
return "JavaScript", format_text(res)
class ViewCSS(View):
name = "CSS"
prompt = ("css", "c")
content_types = [
"text/css"
]
def __call__(self, data, **metadata):
if cssutils:
sheet = cssutils.parseString(data)
beautified = sheet.cssText
else:
beautified = data
return "CSS", format_text(beautified)
class ViewImage(View):
name = "Image"
prompt = ("image", "i")
content_types = [
"image/png",
"image/jpeg",
"image/gif",
"image/vnd.microsoft.icon",
"image/x-icon",
]
def __call__(self, data, **metadata):
try:
img = Image.open(cStringIO.StringIO(data))
except IOError:
return None
parts = [
("Format", str(img.format_description)),
("Size", "%s x %s px" % img.size),
("Mode", str(img.mode)),
]
for i in sorted(img.info.keys()):
if i != "exif":
parts.append(
(str(i), str(img.info[i]))
)
if hasattr(img, "_getexif"):
ex = img._getexif()
if ex:
for i in sorted(ex.keys()):
tag = TAGS.get(i, i)
parts.append(
(str(tag), str(ex[i]))
)
fmt = format_dict(ODict(parts))
return "%s image" % img.format, fmt
class ViewProtobuf(View):
"""Human friendly view of protocol buffers
The view uses the protoc compiler to decode the binary
"""
name = "Protocol Buffer"
prompt = ("protobuf", "p")
content_types = [
"application/x-protobuf",
"application/x-protobuffer",
]
@staticmethod
def is_available():
try:
p = subprocess.Popen(
["protoc", "--version"],
stdout=subprocess.PIPE
)
out, _ = p.communicate()
return out.startswith("libprotoc")
except:
return False
def decode_protobuf(self, content):
# if Popen raises OSError, it will be caught in
# get_content_view and fall back to Raw
p = subprocess.Popen(['protoc', '--decode_raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=content)
if out:
return out
else:
return err
def __call__(self, data, **metadata):
decoded = self.decode_protobuf(data)
return "Protobuf", format_text(decoded)
class ViewWBXML(View):
name = "WBXML"
prompt = ("wbxml", "w")
content_types = [
"application/vnd.wap.wbxml",
"application/vnd.ms-sync.wbxml"
]
def __call__(self, data, **metadata):
try:
parser = ASCommandResponse(data)
parsedContent = parser.xmlString
if parsedContent:
return "WBXML", format_text(parsedContent)
except:
return None
views = []
content_types_map = {}
view_prompts = []
def get(name):
for i in views:
if i.name == name:
return i
def get_by_shortcut(c):
for i in views:
if i.prompt[1] == c:
return i
def add(view):
# TODO: auto-select a different name (append an integer?)
for i in views:
if i.name == view.name:
raise ContentViewException("Duplicate view: " + view.name)
# TODO: the UI should auto-prompt for a replacement shortcut
for prompt in view_prompts:
if prompt[1] == view.prompt[1]:
raise ContentViewException("Duplicate view shortcut: " + view.prompt[1])
views.append(view)
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.append(view)
view_prompts.append(view.prompt)
def remove(view):
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.remove(view)
if not len(l):
del content_types_map[ct]
view_prompts.remove(view.prompt)
views.remove(view)
add(ViewAuto())
add(ViewRaw())
add(ViewHex())
add(ViewJSON())
add(ViewXML())
add(ViewWBXML())
add(ViewHTML())
add(ViewHTMLOutline())
add(ViewJavaScript())
add(ViewCSS())
add(ViewURLEncoded())
add(ViewMultipart())
add(ViewImage())
if pyamf:
add(ViewAMF())
if ViewProtobuf.is_available():
add(ViewProtobuf())
def safe_to_print(lines, encoding="utf8"):
"""
Wraps a content generator so that each text portion is a *safe to print* unicode string.
"""
for line in lines:
clean_line = []
for (style, text) in line:
try:
text = clean_bin(text.decode(encoding, "strict"))
except UnicodeDecodeError:
text = clean_bin(text).decode(encoding, "strict")
clean_line.append((style, text))
yield clean_line
def get_content_view(viewmode, data, **metadata):
"""
Args:
viewmode: the view to use.
data, **metadata: arguments passed to View instance.
Returns:
A (description, content generator) tuple.
In contrast to calling the views directly, text is always safe-to-print unicode.
Raises:
ContentViewException, if the content view threw an error.
"""
if not data:
return "No content", []
msg = []
headers = metadata.get("headers", {})
enc = headers.get("content-encoding")
if enc and enc != "identity":
decoded = encoding.decode(enc, data)
if decoded:
data = decoded
msg.append("[decoded %s]" % enc)
try:
ret = viewmode(data, **metadata)
# Third-party viewers can fail in unexpected ways...
except Exception as e:
six.reraise(
ContentViewException,
ContentViewException(str(e)),
sys.exc_info()[2]
)
if not ret:
ret = get("Raw")(data, **metadata)
msg.append("Couldn't parse: falling back to Raw")
else:
msg.append(ret[0])
return " ".join(msg), safe_to_print(ret[1])
| mit | 1,470,349,869,913,732,900 | 26.583471 | 98 | 0.54704 | false | 3.998083 | false | false | false |
dropbox/changes | changes/listeners/mail.py | 1 | 8772 | from __future__ import absolute_import, print_function
from itertools import imap
import logging
import toronado
from email.utils import parseaddr
from flask import current_app, render_template
from flask_mail import Message, sanitize_address
from jinja2 import Markup
from typing import List # NOQA
from changes.config import db, mail
from changes.constants import Result, Status
from changes.db.utils import try_create
from changes.lib import build_context_lib, build_type
from changes.lib.build_context_lib import CollectionContext # NOQA
from changes.models.event import Event, EventType
from changes.models.build import Build
from changes.models.job import Job
from changes.models.jobplan import JobPlan
from changes.models.project import ProjectOption
def filter_recipients(email_list, domain_whitelist=None):
"""
Returns emails from email_list that have been white-listed by
domain_whitelist.
"""
if domain_whitelist is None:
domain_whitelist = current_app.config['MAIL_DOMAIN_WHITELIST']
if not domain_whitelist:
return email_list
return [
e for e in email_list
if parseaddr(e)[1].split('@', 1)[-1] in domain_whitelist
]
class MailNotificationHandler(object):
logger = logging.getLogger('mail')
def send(self, msg, build):
msg.recipients = filter_recipients(msg.recipients)
if not msg.recipients:
self.logger.info(
'Exiting for collection_id={} because its message has no '
'recipients.'.format(build.collection_id))
return
event = try_create(Event, where={
'type': EventType.email,
'item_id': build.collection_id,
'data': {
'triggering_build_id': build.id.hex,
'recipients': msg.recipients,
}
})
# If we were unable to create the Event, we must've done so (and thus sent the mail) already.
if not event:
self.logger.warning('An email has already been sent for collection_id=%s, (build_id=%s).',
build.collection_id, build.id.hex)
return
mail.send(msg)
def get_msg(self, builds):
# type: (List[Build]) -> Message
context = build_context_lib.get_collection_context(builds) # type: CollectionContext
if context.result == Result.passed:
return None
max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_MAIL', 3)
context_dict = context._asdict()
context_dict.update({
'MAX_SHOWN_ITEMS_PER_BUILD': max_shown,
'showing_failing_tests_count':
sum([min(b['failing_tests_count'], max_shown) for b in context.builds])
})
recipients = self.get_collection_recipients(context)
msg = Message(context.title, recipients=recipients, extra_headers={
'Reply-To': ', '.join(sanitize_address(r) for r in recipients),
})
msg.body = render_template('listeners/mail/notification.txt', **context_dict)
msg.html = Markup(toronado.from_string(
render_template('listeners/mail/notification.html', **context_dict)
))
return msg
def get_collection_recipients(self, collection_context):
# type: (CollectionContext) -> List[unicode]
"""
Returns a list of recipients for a collection context created by
get_collection_context. Only recipients for failing builds will be
returned.
"""
recipient_lists = map(
lambda build_context: self.get_build_recipients(build_context['build']),
collection_context.builds)
return list(set([r for rs in recipient_lists for r in rs]))
def get_build_recipients(self, build):
# type: (Build) -> List[unicode]
"""
Returns a list of recipients for a build.
The build author is included unless the build and all failing jobs
have turned off the mail.notify-author option.
Successful builds will return the empty list.
Recipients are also collected from each failing job's
mail.notify-addresses and mail.notify-addresses-revisions options.
Should there be no failing jobs (is that possible?), recipients are
collected from the build's own mail.notify-addresses and
mail.notify-addresses-revisions options.
"""
if build.result == Result.passed:
return []
recipients = []
options = self.get_build_options(build)
if options['mail.notify-author']:
author = build.author
if author:
recipients.append(u'%s <%s>' % (author.name, author.email))
recipients.extend(options['mail.notify-addresses'])
if build_type.is_initial_commit_build(build):
recipients.extend(options['mail.notify-addresses-revisions'])
return recipients
def get_build_options(self, build):
"""
Returns a build's mail options as a
{
'mail.notify-author': bool,
'mail.notify-addresses': set,
'mail.notify-addresses-revisions': set,
} dict.
The 'mail.notify-author' option is True unless the build and all
failing jobs have turned off the mail.notify-author option.
The mail.notify-addresses and mail.notify-addresses-revisions options
respectively are sets of email addresses constructed by merging the
corresponding options of all failing jobs. Note that the build's
options are used as defaults when constructing the options for
each job, so that the job options override the build options.
Finally, the build's own options are used if there are no failing jobs.
"""
default_options = {
'mail.notify-author': '1',
'mail.notify-addresses': '',
'mail.notify-addresses-revisions': '',
}
build_options = dict(
default_options,
**dict(db.session.query(
ProjectOption.name, ProjectOption.value
).filter(
ProjectOption.project_id == build.project_id,
ProjectOption.name.in_(default_options.keys()),
))
)
# Get options for all failing jobs.
jobs_options = []
for job in list(Job.query.filter(Job.build_id == build.id)):
if job.result != Result.passed:
jobs_options.append(dict(
build_options, **self.get_job_options(job)))
# Merge all options.
# Fallback to build options in case there are no failing jobs.
all_options = jobs_options or [build_options]
merged_options = {
# Notify the author unless all jobs and the build have turned the
# notify-author option off.
'mail.notify-author': any(
imap(
lambda options: options.get('mail.notify-author') == '1',
all_options,
),
),
'mail.notify-addresses': set(),
'mail.notify-addresses-revisions': set(),
}
recipient_keys = ['mail.notify-addresses', 'mail.notify-addresses-revisions']
for options in all_options:
for key in recipient_keys:
# XXX(dcramer): we dont have option validators so lets assume
# people enter slightly incorrect values
merged_options[key] |= set(
[x.strip() for x in options[key].split(',') if x.strip()]
)
return merged_options
def get_job_options(self, job):
jobplan = JobPlan.query.filter(
JobPlan.job_id == job.id,
).first()
options = {}
if jobplan and 'snapshot' in jobplan.data:
options = jobplan.data['snapshot']['options']
return options
def build_finished_handler(build_id, *args, **kwargs):
build = Build.query.get(build_id)
if not build:
return
if not build.collection_id:
# If there isn't a collection_id, assume the build stands alone.
# All builds should probably have collection_id set.
builds = [build]
else:
builds = list(
Build.query.filter(Build.collection_id == build.collection_id))
# Exit if there are no builds for the given build_id, or any build hasn't
# finished.
if not builds or any(map(lambda build: build.status != Status.finished, builds)):
return
notification_handler = MailNotificationHandler()
msg = notification_handler.get_msg(builds)
if msg is not None:
notification_handler.send(msg, build)
| apache-2.0 | 7,238,504,638,627,023,000 | 35.39834 | 102 | 0.614683 | false | 4.35119 | false | false | false |
pidydx/grr | grr/lib/flows/general/audit.py | 1 | 2003 | #!/usr/bin/env python
"""This implements the auditing system.
How does it work?
Noteworthy events within the GRR system (such as approval granting, flow
execution etc) generate events to notify listeners about the event.
The audit system consists of a group of event listeners which receive these
events and act upon them.
"""
from grr.lib import aff4
from grr.lib import events
from grr.lib import flow
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import sequential_collection
AUDIT_EVENT = "Audit"
class AuditEventCollection(sequential_collection.IndexedSequentialCollection):
RDF_TYPE = events.AuditEvent
def AllAuditLogs(token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open("aff4:/audit/logs", token=token).ListChildren():
yield AuditEventCollection(log, token=token)
def AuditLogsForTimespan(start_time, end_time, token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open(
"aff4:/audit/logs", token=token).ListChildren(age=(start_time, end_time)):
yield AuditEventCollection(log, token=token)
class AuditEventListener(flow.EventListener):
"""Receive the audit events."""
well_known_session_id = rdfvalue.SessionID(
base="aff4:/audit", queue=queues.FLOWS, flow_name="listener")
EVENTS = [AUDIT_EVENT]
created_logs = set()
def EnsureLogIsIndexed(self, log_urn):
if log_urn not in self.created_logs:
# Just write any type to the aff4 space so we can determine
# which audit logs exist easily.
aff4.FACTORY.Create(
log_urn, aff4.AFF4Volume, mode="w", token=self.token).Close()
self.created_logs.add(log_urn)
return log_urn
@flow.EventHandler(auth_required=False)
def ProcessMessage(self, message=None, event=None):
_ = message
log_urn = aff4.CurrentAuditLog()
self.EnsureLogIsIndexed(log_urn)
AuditEventCollection.StaticAdd(log_urn, self.token, event)
| apache-2.0 | 4,533,730,079,903,174,000 | 30.793651 | 80 | 0.736895 | false | 3.489547 | false | false | false |
MicBrain/Tic_Tac_Toe | Tic_Tac_Toe.py | 1 | 8653 | ###################
### DESCRIPTION ###
###################
"""
Tic-tac-toe (or Noughts and crosses, Xs and Os) is a game for two players, X and O, who take
turns marking the spaces in a 3×3 grid. The player who succeeds in placing three respective marks
in a horizontal, vertical, or diagonal row wins the game.
The simplicity of Tic-tac-toe makes it ideal as a pedagogical tool for teaching the concepts
of good sportsmanship and the branch of artificial intelligence that deals with the searching of
game trees. It is straightforward to write a computer program to play Tic-tac-toe perfectly.
The game can be generalized to an m,n,k-game in which two players alternate placing stones of
their own color on an m×n board, with the goal of getting k of their own color in a row. Tic-tac-toe
is the (3,3,3)-game.
Despite its apparent simplicity, Tic-tac-toe requires detailed analysis to determine even some
elementary combinatory facts, the most interesting of which are the number of possible games and the
number of possible positions. A position is merely a state of the board, while a game usually refers
to the way a terminal position is obtained.
"""
from string import *
from random import *
import itertools
import math
####################
## MAIN VARIABLES ##
####################
Player_1 = 'x' # player 1's mark
Player_2 = 'o' # player 2's mark
A = 'A' # these just make it easier to keep referring to 'A', 'B' and 'C'
B = 'B'
C = 'C'
#####################
## State variables ##
#####################
EMPTY = ' '
Table = [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
current = randint(1, 2)
#########################
### Coordinate system ###
#########################
def square(row, col): # squares are represented as tuples of (row, col).
return (row, col) # rows are numbered 1 thru 3, cols 'A' thru 'C'.
def square_row(square): # these two functions save us the hassle of using
return square[0] # index values in our code, e.g. square[0]...
def square_col(square): # from this point on, i should never directly use
return square[1] # tuples when working with squares.
def get_square(square):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
return Table[row_i][col_i] # note how this and set_square are the ONLY
# functions which directly use board!
def set_square(square, mark):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
Table[row_i][col_i] = mark # note how this and get_square are the ONLY
def get_row(row):
return [get_square((row, A)), get_square((row, B)), get_square((row, C))]
def get_column(col):
return [get_square((1, col)), get_square((2, col)), get_square((3, col))]
def get_diagonal(corner_square):
if corner_square == (1, A) or corner_square == (3, C):
return [get_square((1, A)), get_square((2, B)), get_square((3, C))]
else:
return [get_square((1, C)), get_square((2, B)), get_square((3, A))]
def get_mark(player):
if player == 1:
return Player_1
else:
return Player_2
def all_squares_filled():
for row in range(1, 4): # range(1, 4) returns the list [1, 2, 3]
if EMPTY in get_row(row):
return False # this row contains an empty square, we know enough
return True # no empty squares found, all squares are filled
def player_has_won(player):
MARK = get_mark(player)
win = [MARK, MARK, MARK]
if get_row(1) == win or get_row(2) == win or get_row(3) == win:
return True
if get_column(A) == win or get_column(B) == win or get_column(C) == win:
return True
if get_diagonal((1, A)) == win or get_diagonal((1, C)) == win:
return True
return False
def draw_board_straight():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append("1 " + A1 + " | " + B1 + " | " + C1 + " ")
lines.append(" ---+---+---")
lines.append("2 " + A2 + " | " + B2 + " | " + C2 + " ")
lines.append(" ---+---+---")
lines.append("3 " + A3 + " | " + B3 + " | " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n') # the '\n' represents a newline
def draw_board_slanted():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append(" 1 " + A1 + " / " + B1 + " / " + C1 + " ")
lines.append(" ---/---/--- ")
lines.append(" 2 " + A2 + " / " + B2 + " / " + C2 + " ")
lines.append(" ---/---/--- ")
lines.append("3 " + A3 + " / " + B3 + " / " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n')
def draw_board():
return draw_board_slanted()
def reset_main_board():
for row in (1, 2, 3):
for col in (A, B, C):
set_square(square(row, col), EMPTY)
def play():
global current
reset_main_board()
current = randint(1, 2)
print ("Tic-Tac-Toe!")
print
player1_name = input("Player 1, what is your name? ")
player2_name = input("Player 2, what is your name? ")
def get_name(player):
if player == 1:
return player1_name
else:
return player2_name
print
print ("Welcome,", player1_name, "and", player2_name + "!")
print (player1_name, "will be", Player_1 + ", and", player2_name, "will be", Player_2 + ".")
print ("By random decision,", get_name(current), "will go first.")
print
input("[Press enter when ready to play.] ") # just waiting for them to press enter
print (draw_board())
while not all_squares_filled():
choice = input(get_name(current) + ", which square? (e.g. 2B, 2b, B2 or b2) ")
if len(choice) != 2:
print ("That's not a square. You must enter a square like b2, or 3C.")
print
continue
if choice[0] not in ["1", "2", "3"] and str.upper(choice[0]) not in [A, B, C]:
print ("The first character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[1] not in ["1", "2", "3"] and str.upper(choice[1]) not in [A, B, C]:
print ("The second character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[0] in ["1", "2", "3"] and choice[1] in ["1", "2", "3"]:
print ("You entered two rows! You must enter one row and one column (A, B or C).")
print
continue
if str.upper(choice[0]) in [A, B, C] and str.upper(choice[1]) in [A, B, C]:
print ("You entered two columns! You must enter one row (1, 2 or 3) and one column.")
print
continue
if choice[0] in ["1", "2", "3"]:
row = int(choice[0])
col = str.upper(choice[1])
else:
row = int(choice[1])
col = str.upper(choice[0])
choice = square(row, col) # make this into a (row, col) tuple
if get_square(choice) != EMPTY:
print ("Sorry, that square is already marked.")
print
continue
set_square(choice, get_mark(current))
print (draw_board())
if player_has_won(current):
print ("Congratulations", get_name(current), "-- you win!")
print
break
if all_squares_filled():
print ("Cats game!", player1_name, "and", player2_name, "draw.")
print
break
current = 3 - current # sets 1 to 2 and 2 to 1
print ("GAME IS OVER")
print
if __name__ == "__main__":
continue_playing = True
while continue_playing:
play()
again = str.lower(input("Play again? (y/n) "))
print
print
print
if again != "y":
continue_playing = False
print ("Thanks for playing!")
print
| gpl-3.0 | -7,830,777,343,375,921,000 | 37.620536 | 101 | 0.539475 | false | 3.309487 | false | false | false |
jpetto/bedrock | bedrock/firefox/helpers.py | 1 | 8778 | from collections import OrderedDict
from django.core.cache import cache
from django.conf import settings
import jingo
import jinja2
from bedrock.firefox.models import FirefoxOSFeedLink
from bedrock.firefox.firefox_details import firefox_desktop, firefox_android, firefox_ios
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def android_builds(channel, builds=None):
builds = builds or []
variations = OrderedDict([
('api-9', 'Gingerbread'),
('api-15', 'Ice Cream Sandwich+'),
('x86', 'x86'),
])
if channel == 'alpha':
for type, arch_pretty in variations.iteritems():
link = firefox_android.get_download_url('alpha', type)
builds.append({'os': 'android',
'os_pretty': 'Android',
'os_arch_pretty': 'Android %s' % arch_pretty,
'arch': 'x86' if type == 'x86' else 'armv7up %s' % type,
'arch_pretty': arch_pretty,
'download_link': link})
else:
link = firefox_android.get_download_url(channel)
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
def ios_builds(channel, builds=None):
builds = builds or []
link = firefox_ios.get_download_url(channel)
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': link})
return builds
@jingo.register.function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', small=False, icon=True,
platform='all', dom_id=None, locale=None, simple=False,
force_direct=False, force_full_installer=False,
force_funnelcake=False, check_old_fx=False):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta' or 'alpha'.
:param small: Display the small button if True.
:param icon: Display the Fx icon on the button if True.
:param platform: Target platform: 'desktop', 'android', 'ios', or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param simple: Display button with text only if True. Will not display
icon or privacy/what's new/systems & languages links. Can be used
in conjunction with 'small'.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param check_old_fx: Checks to see if the user is on an old version of
Firefox and, if true, changes the button text from 'Free Download'
to 'Update your Firefox'. Must be used in conjunction with
'simple' param being true.
:return: The button html.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
show_ios = platform in ['all', 'ios']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
l_version = firefox_desktop.latest_builds(locale, channel)
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
for plat_os, plat_os_pretty in firefox_desktop.platform_labels.iteritems():
# Windows 64-bit builds are not available on the ESR channel yet
if plat_os == 'win64' and channel in ['esr', 'esr_next']:
continue
# Fallback to en-US if this plat_os/version isn't available
# for the current locale
_locale = locale if plat_os_pretty in platforms else 'en-US'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': plat_os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
if show_android:
builds = android_builds(channel, builds)
if show_ios:
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': firefox_ios.get_download_url()})
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-%s' % platform,
'builds': builds,
'id': dom_id,
'small': small,
'simple': simple,
'channel': alt_channel,
'show_desktop': show_desktop,
'show_android': show_android,
'show_ios': show_ios,
'icon': icon,
'check_old_fx': check_old_fx and simple,
}
html = jingo.render_to_string(ctx['request'],
'firefox/includes/download-button.html',
data)
return jinja2.Markup(html)
@jingo.register.function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
if channel:
kwargs['channel'] = channel
if platform != 'desktop':
kwargs['platform'] = platform
# Firefox for Android and iOS have the system requirements page on SUMO
if platform in ['android', 'ios'] and page == 'sysreq':
return settings.FIREFOX_MOBILE_SYSREQ_URL
return reverse('firefox.%s' % page, kwargs=kwargs)
@jingo.register.function
def firefox_os_feed_links(locale, force_cache_refresh=False):
if locale in settings.FIREFOX_OS_FEED_LOCALES:
cache_key = 'firefox-os-feed-links-' + locale
if not force_cache_refresh:
links = cache.get(cache_key)
if links:
return links
links = list(
FirefoxOSFeedLink.objects.filter(locale=locale).order_by(
'-id').values_list('link', 'title')[:10])
cache.set(cache_key, links)
return links
elif '-' in locale:
return firefox_os_feed_links(locale.split('-')[0])
@jingo.register.function
def firefox_os_blog_link(locale):
try:
return settings.FXOS_PRESS_BLOG_LINKS[locale]
except KeyError:
if '-' in locale:
return firefox_os_blog_link(locale.split('-')[0])
else:
return None
| mpl-2.0 | 5,004,060,017,684,913,000 | 35.728033 | 89 | 0.583276 | false | 4.00639 | false | false | false |
satish-avninetworks/murano | murano/dsl/murano_package.py | 1 | 7758 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import weakref
import semantic_version
import six
from yaql.language import specs
from yaql.language import utils
from murano.dsl import constants
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.dsl import meta as dslmeta
from murano.dsl import murano_object
from murano.dsl import murano_type
from murano.dsl import namespace_resolver
from murano.dsl import principal_objects
from murano.dsl import yaql_integration
class MuranoPackage(dsl_types.MuranoPackage, dslmeta.MetaProvider):
def __init__(self, package_loader, name, version=None,
runtime_version=None, requirements=None, meta=None):
super(MuranoPackage, self).__init__()
self._package_loader = weakref.proxy(package_loader)
self._name = name
self._meta = None
self._version = helpers.parse_version(version)
self._runtime_version = helpers.parse_version(runtime_version)
self._requirements = {
name: semantic_version.Spec('==' + str(self._version.major))
}
if name != constants.CORE_LIBRARY:
self._requirements[constants.CORE_LIBRARY] = \
semantic_version.Spec('==0')
self._classes = {}
self._imported_types = {object, murano_object.MuranoObject}
for key, value in six.iteritems(requirements or {}):
self._requirements[key] = helpers.parse_version_spec(value)
self._load_queue = {}
self._native_load_queue = {}
if self.name == constants.CORE_LIBRARY:
principal_objects.register(self)
self._package_class = self._create_package_class()
self._meta = dslmeta.MetaData(
meta, dsl_types.MetaTargets.Package, self._package_class)
@property
def package_loader(self):
return self._package_loader
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def runtime_version(self):
return self._runtime_version
@property
def requirements(self):
return self._requirements
@property
def classes(self):
return set(self._classes.keys()).union(
self._load_queue.keys()).union(self._native_load_queue.keys())
def get_resource(self, name):
raise NotImplementedError('resource API is not implemented')
# noinspection PyMethodMayBeStatic
def get_class_config(self, name):
return {}
def _register_mpl_classes(self, data, name=None):
type_obj = self._classes.get(name)
if type_obj is not None:
return type_obj
if callable(data):
data = data()
data = helpers.list_value(data)
unnamed_class = None
last_ns = {}
for cls_data in data:
last_ns = cls_data.setdefault('Namespaces', last_ns.copy())
if len(cls_data) == 1:
continue
cls_name = cls_data.get('Name')
if not cls_name:
if unnamed_class:
raise exceptions.AmbiguousClassName(name)
unnamed_class = cls_data
else:
ns_resolver = namespace_resolver.NamespaceResolver(last_ns)
cls_name = ns_resolver.resolve_name(cls_name)
if cls_name == name:
type_obj = murano_type.create(
cls_data, self, cls_name, ns_resolver)
self._classes[name] = type_obj
else:
self._load_queue.setdefault(cls_name, cls_data)
if type_obj is None and unnamed_class:
unnamed_class['Name'] = name
return self._register_mpl_classes(unnamed_class, name)
return type_obj
def _register_native_class(self, cls, name):
if cls in self._imported_types:
return self._classes[name]
try:
m_class = self.find_class(name, False)
except exceptions.NoClassFound:
m_class = self._register_mpl_classes({'Name': name}, name)
m_class.extension_class = cls
for method_name in dir(cls):
if method_name.startswith('_'):
continue
method = getattr(cls, method_name)
if not any((
helpers.inspect_is_method(cls, method_name),
helpers.inspect_is_static(cls, method_name),
helpers.inspect_is_classmethod(cls, method_name))):
continue
method_name_alias = (getattr(
method, '__murano_name', None) or
specs.convert_function_name(
method_name, yaql_integration.CONVENTION))
m_class.add_method(method_name_alias, method, method_name)
self._imported_types.add(cls)
return m_class
def register_class(self, cls, name=None):
if inspect.isclass(cls):
name = name or getattr(cls, '__murano_name', None) or cls.__name__
if name in self._classes:
self._register_native_class(cls, name)
else:
self._native_load_queue.setdefault(name, cls)
elif isinstance(cls, dsl_types.MuranoType):
self._classes[cls.name] = cls
elif name not in self._classes:
self._load_queue[name] = cls
def find_class(self, name, search_requirements=True):
payload = self._native_load_queue.pop(name, None)
if payload is not None:
return self._register_native_class(payload, name)
payload = self._load_queue.pop(name, None)
if payload is not None:
result = self._register_mpl_classes(payload, name)
if result:
return result
result = self._classes.get(name)
if result:
return result
if search_requirements:
pkgs_for_search = []
for package_name, version_spec in six.iteritems(
self._requirements):
if package_name == self.name:
continue
referenced_package = self._package_loader.load_package(
package_name, version_spec)
try:
return referenced_package.find_class(name, False)
except exceptions.NoClassFound:
pkgs_for_search.append(referenced_package)
continue
raise exceptions.NoClassFound(
name, packages=pkgs_for_search + [self])
raise exceptions.NoClassFound(name, packages=[self])
@property
def context(self):
return None
def _create_package_class(self):
ns_resolver = namespace_resolver.NamespaceResolver(None)
return murano_type.MuranoClass(
ns_resolver, self.name, self, utils.NO_VALUE)
def get_meta(self, context):
if not self._meta:
return []
return self._meta.get_meta(context)
def __repr__(self):
return 'MuranoPackage({name})'.format(name=self.name)
| apache-2.0 | 3,568,733,459,473,349,000 | 35.252336 | 78 | 0.59603 | false | 4.10911 | false | false | false |
DevHugo/zds-site | zds/utils/tutorials.py | 1 | 2669 | # coding: utf-8
import os
# Used for indexing tutorials, we need to parse each manifest to know which content have been published
class GetPublished:
published_part = []
published_chapter = []
published_extract = []
def __init__(self):
pass
@classmethod
def get_published_content(cls):
# If all array are empty load_it
if not len(GetPublished.published_part) and \
not len(GetPublished.published_chapter) and \
not len(GetPublished.published_extract):
# Get all published tutorials
from zds.tutorial.models import Tutorial
tutorials_database = Tutorial.objects.filter(sha_public__isnull=False).all()
for tutorial in tutorials_database:
# Load Manifest
json = tutorial.load_json_for_public()
# Parse it
GetPublished.load_tutorial(json)
return {"parts": GetPublished.published_part,
"chapters": GetPublished.published_chapter,
"extracts": GetPublished.published_extract}
@classmethod
def load_tutorial(cls, json):
# Load parts, chapter and extract
if 'parts' in json:
for part_json in json['parts']:
# If inside of parts we have chapters, load it
GetPublished.load_chapters(part_json)
GetPublished.load_extracts(part_json)
GetPublished.published_part.append(part_json['pk'])
GetPublished.load_chapters(json)
GetPublished.load_extracts(json)
@classmethod
def load_chapters(cls, json):
if 'chapters' in json:
for chapters_json in json['chapters']:
GetPublished.published_chapter.append(chapters_json['pk'])
GetPublished.load_extracts(chapters_json)
return GetPublished.published_chapter
@classmethod
def load_extracts(cls, json):
if 'extracts' in json:
for extract_json in json['extracts']:
GetPublished.published_extract.append(extract_json['pk'])
return GetPublished.published_extract
def get_blob(tree, chemin):
for blob in tree.blobs:
try:
if os.path.abspath(blob.path) == os.path.abspath(chemin):
data = blob.data_stream.read()
return data.decode('utf-8')
except (OSError, IOError):
return ""
if len(tree.trees) > 0:
for atree in tree.trees:
result = get_blob(atree, chemin)
if result is not None:
return result
return None
else:
return None
| gpl-3.0 | -8,591,455,257,756,504,000 | 30.034884 | 103 | 0.59423 | false | 4.284109 | false | false | false |
Hubert51/AutoGrading | learning/number_recognization/test.py | 1 | 1250 | from pytesseract import image_to_string
from PIL import Image
import cv2
import numpy
import sys
if __name__ == '__main__':
f = open("test1.txt")
f = f.read()
for element in f:
str1 = element
position = ((712, 571), (725, 587))
dh = position[1][1] - position[0][1]
upper = position[0][1] - 2 * dh
lower = position[1][1] + int(3.5 * dh)
left = position[1][0]
print(upper,lower, left)
img = cv2.imread('answerSheet_with_name.png')
#image = Image.open('answerSheet_with_name.png')
img = img[upper:lower, left:img[1].size]
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,6)
cv2.imshow("hello", img)
################# Now finding Contours ###################
img,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255),1)
im = Image.fromarray(img, 'RGB')
file = open("image_to_string.txt", "w")
# box = image_to_string(image).split('\n')
file.write(image_to_string(im))
#file.write(image_to_string(image))
file.close()
| mit | -715,447,482,893,040,000 | 26.777778 | 98 | 0.6064 | false | 2.82167 | false | false | false |
bblais/Tech-SIE | Estimating_Proportion/Estimating_Proportion.py | 1 | 4755 |
# coding: utf-8
# #Statistical Inference for Everyone: Technical Supplement
#
#
#
# This document is the technical supplement, for instructors, for [Statistical Inference for Everyone], the introductory statistical inference textbook from the perspective of "probability theory as logic".
#
# <img src="http://web.bryant.edu/~bblais/images/Saturn_with_Dice.png" align=center width = 250px />
#
# [Statistical Inference for Everyone]: http://web.bryant.edu/~bblais/statistical-inference-for-everyone-sie.html
#
# ## Estimating a Proportion
#
# $$\newcommand{\twocvec}[2]{\left(\begin{array}{c}
# #1 \\\\ #2
# \end{array}\right)}
# \newcommand{\nchoosek}[2]{\twocvec{#1}{#2}}
# $$
#
# If $\theta$ is the model representing the probability, $\theta$, of the coin
# landing on heads (and $1-\theta$ is the probability of landing on tails), we
# need to make an estimate of probability of model $\theta$ being true given the
# data, which will consist of $N$ flips of which $h$ are heads.
#
# Bayes rule is:
# \begin{eqnarray}
# p(\theta|D,I) &=& \frac{p(D|\theta,I)p(\theta|I)}{p(D|I)} =
# \frac{p(D|\theta,I)p(\theta,I)}{\sum_\theta p(D|\theta,I)p(\theta|I)}
# \end{eqnarray}
#
# Thus, the probability of a particular model $\theta$ being true is the product
# of the probability of the observed data ($h$ heads in $N$ flips) given the
# model $\theta$ and the prior probability of the model $\theta$ being true
# before we even look at the data, divided by the probability of the data itself
# over all models.
#
# The prior probability of model $\theta$ will be assumed to be uniform (from
# maximum entropy considerations). The probability, $\theta$, ranges from 0 to
# 1, to the prior is
# \begin{eqnarray}
# p(\theta|I) = 1
# \end{eqnarray}
#
# The probability of the data given the random model, is just the binomial
# distribution:
#
# \begin{eqnarray}
# p(D|\theta)=\nchoosek{N}{h} \theta^h (1-\theta)^{N-h}
# \end{eqnarray}
#
# The probability of the data, $p(D|I)$, is found by summing (or in this case
# integrating) $p(D|\theta,I)p(\theta|I)$ for all $\theta$:
#
# \begin{eqnarray}
# p(D|I) &=& \int_0^1 \nchoosek{N}{h} \theta^h (1-\theta)^{N-h} \cdot 1 d\theta
# \\\\
# &=&\frac{N!}{h!(N-h)!} \frac{h!(N-h)!}{(N+1)!} = \frac{1}{N+1}
# \end{eqnarray}
#
# Now the probability of model $\theta$ being true, given the data, is just
#
# \begin{eqnarray}
# p(\theta|D,I)&=& (N+1) \cdot \nchoosek{N}{h} \theta^h (1-\theta)^{N-h} \\
# &=& \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h}
# \end{eqnarray}
#
#
# ### Max, Mean, Variance
#
# The model with the maximum probability is found by maximizing $p(\theta|D,I)$
# w.r.t. $\theta$:
#
# \begin{eqnarray}
# \frac{dP(\theta|D,I)}{d\theta} &=& 0 = \frac{(N+1)!}{h!(N-h)!} \left(
# -(N-h) \theta^h (1-\theta)^{N-h-1} + h \theta^{h-1} (1-\theta)^{N-h} \right) \\\\
# (N-h) \theta^h (1-\theta)^{N-h-1} &=& h \theta^{h-1} (1-\theta)^{N-h} \\\\
# \theta(N-h) &=& (1-\theta) h = h-\theta h = N\theta-\theta h \\\\
# \theta&=&\frac{h}{N} \;\;\;\;\;\surd
# \end{eqnarray}
#
# The average and the standard deviation is also straightforward.
#
#
# \begin{eqnarray}
# \bar{\theta} &=& \int_0^1 \theta \cdot \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h} \\\\
# &=& \frac{(N+1)!}{h!(N-h)!} \int_0^1 \theta^{h+1} (1-\theta)^{N-h} \\\\
# &=&\frac{(N+1)!}{h!(N-h)!} \frac{(h+1)!(N-h)!}{(N+2)!} \\\\
# &=&\frac{h+1}{N+2} \\\\
# \bar{\theta^2} &=& \int_0^1 \theta^2 \cdot \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h} \\\\
# &=&\frac{(N+1)!}{h!(N-h)!} \frac{(h+2)!(N-h)!}{(N+3)!} \\\\
# &=&\frac{(h+1)(h+2)}{(N+2)(N+3)} \\\\
# \sigma^2 &=& \bar{\theta^2} - \bar{\theta}^2 = \frac{(h+1)(h+2)}{(N+2)(N+3)} -
# \frac{(h+1)(h+1)}{(N+2)(N+2)} \\\\
# &=&\frac{(h+1)(N-h+1)}{(N+2)^2(N+3)} \\\\
# &=& \frac{(h+1)}{(N+2)}\left( \frac{n+2}{n+2} - \frac{h+1}{N+2}\right)
# \frac{1}{N+3} \\\\
# &=& \bar{\theta}(1-\bar{\theta})\frac{1}{N+3}
# \end{eqnarray}
#
# ### An Approximation for the Variance
#
# If $f=h/N$ is the actual fraction of heads observed, then the variance above
# can be written as
# \begin{eqnarray}
# \sigma^2 &=&\frac{(fN+1)(N-fN+1)}{(N+2)^2(N+3)} \\\\
# \mbox{(for large $N$)}&\approx& \frac{(fN+1)(N-fN)}{N^3}
# =\frac{(fN+1)(1-f)}{N^2} \\\\
# \mbox{(for large $fN$)}&\approx& \frac{(fN)(N-fN)}{N^2} = \frac{f(1-f)}{N} \\\\
# \sigma^2&\approx& \frac{f(1-f)}{N}
# \end{eqnarray}
#
# In this limit, the distribution (beta distribution) can be approximated with a
# Gaussian.
#
# In[11]:
# ---------------------
# In[8]:
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| mit | 8,721,158,606,299,497,000 | 33.708029 | 206 | 0.578549 | false | 2.258907 | false | false | false |
wylee/django-local-settings | src/local_settings/util.py | 1 | 5070 | import importlib
import io
import os
import dotenv
NO_DEFAULT = type(
"NO_DEFAULT",
(),
{
"__nonzero__": (lambda self: False), # Python 2
"__bool__": (lambda self: False), # Python 3
"__str__": (lambda self: self.__class__.__name__),
"__repr__": (lambda self: str(self)),
"__copy__": (lambda self: self),
},
)()
def get_file_name():
"""Get local settings file from environ or discover it.
If the ``LOCAL_SETTINGS_FILE`` environment variable is set, its
value is returned directly.
Otherwise, the current working directory is searched for
`local.{ext}` for each file extension handled by each loading
:mod:`strategy`. Note that the search is done in alphabetical order
so that if ``local.cfg`` and ``local.yaml`` both exist, the former
will be returned.
Returns:
str: File name if set via environ or discovered
None: File name isn't set and wasn't discovered
"""
file_name = os.environ.get("LOCAL_SETTINGS_FILE")
if file_name:
return file_name
cwd = os.getcwd()
default_file_names = get_default_file_names()
for file_name in default_file_names:
file_name = os.path.join(cwd, file_name)
if os.path.exists(file_name):
return file_name
def get_default_file_names():
"""Get default file names for all loading strategies, sorted."""
from .strategy import get_file_type_map # noqa: Avoid circular import
return sorted(f"local.{ext}" for ext in get_file_type_map())
def parse_file_name_and_section(
file_name, section=None, extender=None, extender_section=None
):
"""Parse file name and (maybe) section.
File names can be absolute paths, relative paths, or asset
specs::
/home/user/project/local.cfg
local.cfg
some.package:local.cfg
File names can also include a section::
some.package:local.cfg#dev
If a ``section`` is passed, it will take precedence over a
section parsed out of the file name.
"""
if "#" in file_name:
file_name, parsed_section = file_name.rsplit("#", 1)
else:
parsed_section = None
if ":" in file_name:
file_name = asset_path(file_name)
if extender:
if not file_name:
# Extended another section in the same file
file_name = extender
elif not os.path.isabs(file_name):
# Extended by another file in the same directory
file_name = abs_path(file_name, relative_to=os.path.dirname(extender))
if section:
pass
elif parsed_section:
section = parsed_section
elif extender_section:
section = extender_section
else:
section = None
return file_name, section
# Path utilities
def abs_path(path, relative_to=None):
"""Make path absolute and normalize it."""
if os.path.isabs(path):
path = os.path.normpath(path)
elif ":" in path:
path = asset_path(path)
else:
path = os.path.expanduser(path)
if relative_to:
path = os.path.join(relative_to, path)
path = os.path.abspath(path)
path = os.path.normpath(path)
return path
def asset_path(path):
"""Get absolute path from asset spec and normalize it."""
if ":" in path:
package_name, rel_path = path.split(":", 1)
else:
package_name, rel_path = path, ""
try:
package = importlib.import_module(package_name)
except ImportError:
raise ValueError(
f"Could not get asset path for {path}; could not import "
f"package: {package_name}"
)
if not hasattr(package, "__file__"):
raise ValueError("Can't compute path relative to namespace package")
package_path = os.path.dirname(package.__file__)
if rel_path:
path = os.path.join(package_path, rel_path)
path = os.path.normpath(path)
return path
def dotenv_path(path=None, relative_to=None, file_name=".env"):
"""Get .env path.
If a path is specified, convert it to an absolute path. Otherwise,
use the default, "./.env".
.. note:: By default, the dotenv package discovers the default .env
file relative to the call site, so we have to tell it use CWD.
"""
if path:
path = abs_path(path, relative_to)
else:
path = dotenv.find_dotenv(filename=file_name, usecwd=True)
return path
def load_dotenv(path=None, relative_to=None, file_name=".env"):
"""Load vars from dotenv file into environ."""
path = dotenv_path(path, relative_to, file_name)
dotenv.load_dotenv(path)
# These TTY functions were copied from Invoke
def is_a_tty(stream):
if hasattr(stream, "isatty") and callable(stream.isatty):
return stream.isatty()
elif has_fileno(stream):
return os.isatty(stream.fileno())
return False
def has_fileno(stream):
try:
return isinstance(stream.fileno(), int)
except (AttributeError, io.UnsupportedOperation):
return False
| mit | -6,834,773,356,538,003,000 | 26.258065 | 82 | 0.622091 | false | 3.843821 | false | false | false |
ngageoint/scale | scale/data/models.py | 1 | 24039 | """Defines the database models for datasets"""
from __future__ import absolute_import, unicode_literals
import copy
import logging
from collections import namedtuple
import django.contrib.postgres.fields
from django.db import models, transaction
from django.db.models import Q, Count
from data.data import data_util
from data.data.json.data_v6 import convert_data_to_v6_json, DataV6
from data.data.exceptions import InvalidData
from data.data.value import FileValue
from data.dataset.dataset import DataSetDefinition
from data.dataset.json.dataset_v6 import convert_definition_to_v6_json, DataSetDefinitionV6
from data.exceptions import InvalidDataSetDefinition, InvalidDataSetMember
from data.serializers import DataSetFileSerializerV6, DataSetMemberSerializerV6
from storage.models import ScaleFile
from util import rest as rest_utils
from util.database import alphabetize
logger = logging.getLogger(__name__)
DataSetValidation = namedtuple('DataSetValidation', ['is_valid', 'errors', 'warnings'])
# DataSetKey = namedtuple('DataSetKey', ['name', 'version'])
class DataSetManager(models.Manager):
"""Provides additional methods for handling datasets"""
def create_dataset_v6(self, definition, title=None, description=None):
"""Creates and returns a new dataset for the given name/title/description/definition/version??
:param definition: Parameter definition of the dataset
:type definition: :class:`data.dataset.dataset.DataSetDefinition`
:param title: Optional title of the dataset
:type title: string
:param description: Optional description of the dataset
:type description: string
:returns: The new dataset
:rtype: :class:`data.models.DataSet`
:raises :class:`data.exceptions.InvalidDataSet`: If a give dataset has an invalid value
"""
if not definition:
definition = DataSetDefinition(definition={})
dataset = DataSet()
dataset.title = title
dataset.description = description
dataset.definition = definition.get_dict()
dataset.save()
return dataset
def get_details_v6(self, dataset_id):
"""Gets additional details for the given dataset id
:returns: The full dataset for the given id
:rtype: :class:`data.models.DataSet`
"""
ds = DataSet.objects.get(pk=dataset_id)
ds.files = DataSetFile.objects.get_dataset_files(ds.id)
return ds
def get_datasets_v6(self, started=None, ended=None, dataset_ids=None, keywords=None, order=None):
"""Handles retrieving datasets - possibly filtered and ordered
:returns: The list of datasets that match the given filters
:rtype: [:class:`data.models.DataSet`]
"""
return self.filter_datasets(started=started, ended=ended, dataset_ids=dataset_ids, keywords=keywords, order=order)
def filter_datasets(self, started=None, ended=None, dataset_ids=None, keywords=None, order=None):
"""Returns a query for dataset models that filters on the given fields
:param started: Query datasets created after this amount of time.
:type started: :class:`datetime.datetime`
:param ended: Query datasets created before this amount of time.
:type ended: :class:`datetime.datetime`
:param dataset_ids: Query datasets assciated with the given id(s)
:type dataset_ids: :func:`list`
:param keywords: Query datasets with title or description matching one of the specified keywords
:type keywords: :func:`list`
:param order: A list of fields to control the sort order.
:type order: :func:`list`
:returns: The dataset query
:rtype: :class:`django.db.models.QuerySet`
"""
# Fetch a list of the datasets
datasets = self.all()
# Apply time range filtering
if started:
datasets = datasets.filter(created__gte=started)
if ended:
datasets = datasets.filter(created__lte=ended)
# Apply additional filters
if dataset_ids:
datasets = datasets.filter(id__in=dataset_ids)
# Execute a sub-query that returns distinct job type names that match the provided filter arguments
if keywords:
key_query = Q()
for keyword in keywords:
key_query |= Q(title__icontains=keyword)
key_query |= Q(description__icontains=keyword)
datasets = datasets.filter(key_query)
# Apply sorting
if order:
ordering = alphabetize(order, DataSet.ALPHABETIZE_FIELDS)
datasets = datasets.order_by(*ordering)
else:
datasets = datasets.order_by('id')
for ds in datasets:
files = DataSetFile.objects.get_file_ids(dataset_ids=[ds.id])
ds.files = len(files)
return datasets
def validate_dataset_v6(self, definition, title=None, description=None):
"""Validates the given dataset definiton
:param definition: The dataset definition
:type definition: dict
:returns: The dataset validation
:rtype: :class:`datset.models.DataSetValidation`
"""
is_valid = True
errors = []
warnings = []
dataset_definition = None
try:
dataset_definition = DataSetDefinitionV6(definition=definition, do_validate=True)
except InvalidDataSetDefinition as ex:
is_valid = False
errors.append(ex.error)
message = 'Dataset definition is invalid: %s' % ex
logger.info(message)
pass
# validate other fields
return DataSetValidation(is_valid, errors, warnings)
def get_dataset_files(self, dataset_id):
"""Returns the files associated with the given dataset
:returns: The list of DataSetFiles matching the file_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = DataSetFile.objects.get_dataset_files(dataset_id=dataset_id)
return files
def get_dataset_members(self, dataset_id):
"""Returns the members associated with the given dataset_id
:returns: The list of DataSetMembers
:rtype: [:class:`data.models.DataSetMember`]
"""
dataset = self.get(pk=dataset_id)
members = DataSetMember.objects.all().filter(dataset=dataset)
return members
class DataSet(models.Model):
"""
Represents a DataSet object
:keyword name: The identifying name of the dataset used by clients for queries
:type name: :class:`django.db.models.CharField`
:keyword version: The version of the dataset
:type version: :class:`django.db.models.CharField`
:keyword version_array: The version of the dataset split into SemVer integer components (major,minor,patch,prerelease)
:type version_array: :func:`list`
:keyword title: The human-readable title of this dataset (optional)
:type title: :class:`django.db.models.CharField`
:keyword description: The description of the dataset (optional)
:type description: :class:`django.db.models.CharField`
:keyword created: Defines the created time of the dataset
:type created: :class:`django.db.models.DateTimeField`
:keyword definition: Defines the dataset
:type definition: class:`django.contrib.postgres.fields.JSONField`
"""
ALPHABETIZE_FIELDS = ['title', 'description']
title = models.CharField(blank=True, max_length=50, null=True)
description = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
definition = django.contrib.postgres.fields.JSONField(default=dict)
objects = DataSetManager()
def get_definition(self):
"""Returns the dataset definition
:returns: The DataSet definition
:rtype: :class:`data.dataset.dataset.DataSetDefinition`
"""
if isinstance(self.definition, basestring):
self.definition = {}
return DataSetDefinitionV6(definition=self.definition).get_definition()
def get_v6_definition_json(self):
"""Returns the dataset definition in v6 of the JSON schema
:returns: The dataset definition in v6 of the JSON schema
:rtype: dict
"""
return rest_utils.strip_schema_version(convert_definition_to_v6_json(self.get_definition()).get_dict())
def get_dataset_definition(self):
"""Returns the dataset definition
:returns: The dataset definition json
:rtype: dict
"""
return self.definition
def get_dataset_members_json(self):
"""Returns the JSON for the associated dataset members
:returns: Returns the outgoing primitive representation.
:rtype: dict?
"""
members = DataSet.objects.get_dataset_members(dataset_id=self.id)
serializer = DataSetMemberSerializerV6(members, many=True)
return serializer.data
def get_dataset_files_json(self):
"""Returns the JSON for the associated dataset files
:returns: Returns the outgoing primitive representation.
:rtype: dict?
"""
files = DataSet.objects.get_dataset_files(self.id)
serializer = DataSetFileSerializerV6(files, many=True)
return serializer.data
class Meta(object):
"""meta information for the db"""
db_table = 'data_set'
class DataSetMemberManager(models.Manager):
"""Provides additional methods for handling dataset members"""
def build_data_list(self, template, data_started=None, data_ended=None, created_started=None, created_ended=None,
source_started=None, source_ended=None, source_sensor_classes=None, source_sensors=None,
source_collections=None,source_tasks=None, mod_started=None, mod_ended=None, job_type_ids=None,
job_type_names=None, job_ids=None, is_published=None, is_superseded=None, file_names=None,
job_outputs=None, recipe_ids=None, recipe_type_ids=None, recipe_nodes=None, batch_ids=None, order=None):
"""Builds a list of data dictionaries from a template and file filters
:param template: The template to fill with files found through filters
:type template: dict
:param data_started: Query files where data started after this time.
:type data_started: :class:`datetime.datetime`
:param data_ended: Query files where data ended before this time.
:type data_ended: :class:`datetime.datetime`
:param created_started: Query files created after this time.
:type created_started: :class:`datetime.datetime`
:param created_ended: Query files created before this time.
:type created_ended: :class:`datetime.datetime`
:param source_started: Query files where source collection started after this time.
:type source_started: :class:`datetime.datetime`
:param source_ended: Query files where source collection ended before this time.
:type source_ended: :class:`datetime.datetime`
:param source_sensor_classes: Query files with the given source sensor class.
:type source_sensor_classes: :func:`list`
:param source_sensor: Query files with the given source sensor.
:type source_sensor: :func:`list`
:param source_collection: Query files with the given source class.
:type source_collection: :func:`list`
:param source_tasks: Query files with the given source tasks.
:type source_tasks: :func:`list`
:param mod_started: Query files where the last modified date is after this time.
:type mod_started: :class:`datetime.datetime`
:param mod_ended: Query files where the last modified date is before this time.
:type mod_ended: :class:`datetime.datetime`
:param job_type_ids: Query files with jobs with the given type identifier.
:type job_type_ids: :func:`list`
:param job_type_names: Query files with jobs with the given type name.
:type job_type_names: :func:`list`
:keyword job_ids: Query files with a given job id
:type job_ids: :func:`list`
:param is_published: Query files flagged as currently exposed for publication.
:type is_published: bool
:param is_superseded: Query files that have/have not been superseded.
:type is_superseded: bool
:param file_names: Query files with the given file names.
:type file_names: :func:`list`
:keyword job_outputs: Query files with the given job outputs
:type job_outputs: :func:`list`
:keyword recipe_ids: Query files with a given recipe id
:type recipe_ids: :func:`list`
:keyword recipe_nodes: Query files with a given recipe nodes
:type recipe_nodes: :func:`list`
:keyword recipe_type_ids: Query files with the given recipe types
:type recipe_type_ids: :func:`list`
:keyword batch_ids: Query files with batches with the given identifiers.
:type batch_ids: :func:`list`
:param order: A list of fields to control the sort order.
:type order: :func:`list`
"""
files = ScaleFile.objects.filter_files(
data_started=data_started, data_ended=data_ended,
source_started=source_started, source_ended=source_ended,
source_sensor_classes=source_sensor_classes, source_sensors=source_sensors,
source_collections=source_collections, source_tasks=source_tasks,
mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids,
job_type_names=job_type_names, job_ids=job_ids,
file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids,
recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids,
order=order)
data_list = []
try:
for f in files:
entry = copy.deepcopy(template)
file_params = entry['files']
for p in file_params:
if file_params[p] == 'FILE_VALUE':
file_params[p] = [f.id]
data_list.append(DataV6(data=entry, do_validate=True).get_data())
except (KeyError, TypeError) as ex:
raise InvalidData('INVALID_TEMPLATE', "Specified template is invalid: %s" % ex)
return data_list
def validate_data_list(self, dataset_def, data_list):
"""Validates a list of data objects against a dataset
:param dataset_def: The dataset definition the member is a part of
:type dataset_def:
:param data_list: Data definitions of the dataset members
:type data_list: [:class:`data.data.data.Data`]
"""
is_valid = True
errors = []
warnings = []
for data in data_list:
try:
dataset_def.validate(data)
except (InvalidData, InvalidDataSetMember) as ex:
is_valid = False
errors.append(ex.error)
message = 'Dataset definition is invalid: %s' % ex
logger.info(message)
pass
# validate other fields
return DataSetValidation(is_valid, errors, warnings)
def create_dataset_members(self, dataset, data_list):
"""Creates a dataset member
:param dataset: The dataset the member is a part of
:type dataset: :class:`data.models.DataSet`
:param data_list: Data definitions of the dataset members
:type data_list: [:class:`data.data.data.Data`]
"""
with transaction.atomic():
dataset_members = []
datasetfiles = []
existing_scale_ids = DataSetFile.objects.get_file_ids(dataset_ids=[dataset.id])
for d in data_list:
dataset_member = DataSetMember()
dataset_member.dataset = dataset
dataset_member.data = convert_data_to_v6_json(d).get_dict()
dataset_member.file_ids = list(data_util.get_file_ids(d))
dataset_members.append(dataset_member)
datasetfiles.extend(DataSetFile.objects.create_dataset_files(dataset, d, existing_scale_ids))
existing_scale_ids.append(dataset_member.file_ids)
DataSetFile.objects.bulk_create(datasetfiles)
return DataSetMember.objects.bulk_create(dataset_members)
def get_dataset_members(self, dataset):
"""Returns dataset members for the given dataset
:returns: members for a given dataset
:rtype: QuerySet<DataSetMember>
"""
return self.all().filter(dataset=dataset).order_by('id')
def get_details_v6(self, dsm_id):
"""Gets additional details for the given dataset member id
:returns: The full dataset member for the given id
:rtype: :class:`data.models.DataSetMember`
"""
dsm = DataSetMember.objects.get(pk=dsm_id)
dsm.files = DataSetFile.objects.filter(dataset=dsm.dataset, scale_file_id__in=list(dsm.file_ids))
return dsm
class DataSetMember(models.Model):
"""
Defines the data of a dataset? contains list/descriptors of DataFiles
:keyword dataset: Refers to dataset member belongs to
:type dataset: :class:`django.db.models.ForeignKey`
:keyword data: JSON description of the data in this DataSetMember.
:type data: :class: `django.contrib.postgres.fields.JSONField(default=dict)`
:keyword created: Created Time
:type created: datetime
"""
dataset = models.ForeignKey('data.DataSet', on_delete=models.PROTECT)
data = django.contrib.postgres.fields.JSONField(default=dict)
file_ids = django.contrib.postgres.fields.ArrayField(models.IntegerField(null=True))
created = models.DateTimeField(auto_now_add=True)
objects = DataSetMemberManager()
def get_dataset_definition(self):
"""Returns the dataset definition
:returns: The dataset definition
:rtype: :class:`data.dataset.dataset.DataSetDefinition`
"""
return self.dataset.get_definition()
def get_data(self):
"""Returns the data for this datasetmember
:returns: The data for this datasetmember
:rtype: :class:`data.data.data.Data`
"""
return DataV6(data=self.data, do_validate=False).get_data()
def get_v6_data_json(self):
"""Returns the data for this datasetmember as v6 json with the version stripped
:returns: The v6 JSON output data dict for this datasetmember
:rtype: dict
"""
return rest_utils.strip_schema_version(convert_data_to_v6_json(self.get_data()).get_dict())
class Meta(object):
"""meta information for the db"""
db_table = 'data_set_member'
class DataSetFileManager(models.Manager):
"""Manages the datasetfile model"""
def create_dataset_files(self, dataset, data, existing_scale_ids):
"""Creates dataset files for the given dataset and data"""
datasetfiles = []
for i in data.values.keys():
v = data.values[i]
if type(v) is FileValue:
for id in v.file_ids:
if id in existing_scale_ids:
continue
file = DataSetFile()
file.dataset = dataset
file.scale_file = ScaleFile.objects.get(pk=id)
file.parameter_name = i
datasetfiles.append(file)
return datasetfiles
def get_file_ids(self, dataset_ids, parameter_names=None):
"""Returns a list of the file IDs for the given datasets, optionally filtered by parameter_name.
:param dataset_ids: The ids of the associated datasets
:type dataset_ids: integer
:param parameter_names: The parameter names to search for in the given datasets
:type parameter_names: string
:returns: The list of scale file IDs
:rtype: :func:`list`
"""
query = self.all().filter(dataset_id__in=list(dataset_ids))
if parameter_names:
query = query.filter(parameter_name__in=list(parameter_names))
return [result.scale_file_id for result in query.only('scale_file_id').distinct()]
def get_dataset_ids(self, file_ids, all_files=False):
"""Returns a list of the dataset IDs that contain the given files
:param file_ids: The ids of the files to look for
:type dataset_id: integer
:param all_files: Whether or not a dataset must contain all files or just some of the files in the list
:type all_files: bool
:returns: The list of dataset IDs
:rtype: :func:`list`
"""
results = []
if not all_files:
query = self.all().filter(scale_file_id__in=list(file_ids)).only('dataset_id').distinct()
results = [result.dataset_id for result in query]
else:
query = self.all().filter(scale_file_id__in=list(file_ids)).values('dataset_id').annotate(total=Count('dataset_id')).order_by('total')
for result in query:
if result['total'] == len(file_ids):
results.append(result['dataset_id'])
return results
def get_files(self, dataset_ids, parameter_names=None):
"""Returns the dataset files associated with the given dataset_ids
:param dataset_ids: The ids of the associated datasets
:type dataset_ids: integer
:param parameter_names: The parameter names to search for in the given datasets
:type parameter_names: string
:returns: The DataSetFiles associated with that dataset_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = self.all().filter(dataset_id__in=list(dataset_ids))
if parameter_names:
files = files.filter(parameter_name__in=list(parameter_names))
return files
def get_datasets(self, file_ids, all_files=False):
"""Returns the datasets associated with the given file_id
:param file_id: The id of the associated file
:type file_id: integer
:param all_files: Whether or not a dataset must contain all files or just some of the files in the list
:type all_files: bool
:returns: The DataSets associated with that dataset_id
:rtype: [:class:`data.models.DataSet`]
"""
dataset_ids = self.get_dataset_ids(file_ids=file_ids, all_files=all_files)
datasets = DataSet.objects.filter(id__in=dataset_ids)
return datasets
def get_dataset_files(self, dataset_id):
"""Returns the dataset files associated with the given dataset_id
:param dataset_id: The id of the associated dataset
:type dataset_id: integer
:returns: The DataSetFiles associated with that dataset_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = DataSetFile.objects.filter(dataset_id=dataset_id)
return files
class DataSetFile(models.Model):
"""
The actual file in a dataset member
:keyword dataset: Refers to the dataset the file is a member of
:type dataset: :class:`django.db.models.ForeignKey`
:keyword scale_file: Refers to the ScaleFile
:type scale_file: :class:`django.db.models.ForeignKey`
:keyword parameter_name: Refers to the File parameter name
:type parameter_name: :class:`django.db.models.CharField`
"""
dataset = models.ForeignKey('data.DataSet', on_delete=models.PROTECT)
scale_file = models.ForeignKey('storage.ScaleFile', on_delete=models.PROTECT)
parameter_name = models.CharField(db_index=True, max_length=50)
objects = DataSetFileManager()
class Meta(object):
"""meta information for the db"""
db_table = 'data_set_file'
unique_together = ("dataset", "scale_file") | apache-2.0 | 391,126,229,335,592,260 | 39.745763 | 146 | 0.650193 | false | 4.198952 | false | false | false |
alphatwirl/alphatwirl | alphatwirl/summary/Scan.py | 1 | 1209 | # Tai Sakuma <[email protected]>
##__________________________________________________________________||
import numpy as np
import copy
##__________________________________________________________________||
class Scan:
def __init__(self, val=None, weight=1, contents=None):
if contents is not None:
self.contents = contents
return
if val is None:
self.contents = [ ]
return
self.contents = [val]
def __add__(self, other):
contents = self.contents + other.contents
return self.__class__(contents=contents)
def __radd__(self, other):
# is called with other = 0 when e.g. sum([obj1, obj2])
if other == 0:
return self.__class__() + self
raise TypeError('unsupported: {!r} + {!r}'.format(other, self))
def __repr__(self):
return '{}(contents={})'.format(self.__class__.__name__, self.contents)
def __eq__(self, other):
return self.contents == other.contents
def __copy__(self):
contents = list(self.contents)
return self.__class__(contents=contents)
##__________________________________________________________________||
| bsd-3-clause | -5,268,649,975,022,638,000 | 28.487805 | 79 | 0.456576 | false | 4.428571 | false | false | false |
absperf/wagtailapproval | wagtailapproval/menu.py | 1 | 3637 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from django.contrib.auth import get_user
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy as _n
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.menu import MenuItem
from .models import ApprovalStep
def get_user_approval_items(user):
'''Get an iterable of all items pending for a user's approval.
:param User user: A user object whose groups are to be checked for
appropriate steps
:rtype: Iterable[ApprovalItem]
:returns: All the items that this user can approve or reject.
'''
if user.is_superuser:
steps = ApprovalStep.objects.all()
else:
groups = user.groups.all()
steps = ApprovalStep.objects.filter(group__in=groups)
return itertools.chain.from_iterable(
step.get_items(user) for step in steps)
class ApprovalMenuItem(MenuItem):
'''The menu item that shows in the wagtail sidebar'''
def __init__(
self, label=_('Approval'), url=reverse_lazy('wagtailapproval:index'),
classnames='icon icon-tick-inverse', order=201, **kwargs):
super(ApprovalMenuItem, self).__init__(
label,
url,
classnames=classnames,
order=order,
**kwargs)
def is_shown(self, request):
'''Only show the menu if the user is in an owned approval group'''
user = get_user(request)
# If the user is superuser, show the menu if any steps exist at all
if user.is_superuser:
return ApprovalStep.objects.exists()
groups = user.groups.all()
if ApprovalStep.objects.filter(group__in=groups).exists():
# Display the approval notification only outside of the approval
# paths
if not request.path.startswith(reverse('wagtailapproval:index')):
# Get the count of waiting approvals
waiting_approvals = sum(
1 for _ in get_user_approval_items(user))
if waiting_approvals > 0:
messages.info(
request,
_n(
'{num:d} item waiting for approval',
'{num:d} items waiting for approval',
waiting_approvals).format(num=waiting_approvals),
buttons=[
messages.button(
reverse('wagtailapproval:index'),
_('Examine Now'))
]
)
return True
return False
class ApprovalAdminMenuItem(MenuItem):
'''The admin menu item that shows in the wagtail sidebar, for
administrating entire pipelines and manually dropping items into steps.'''
def __init__(
self, label=_('Approval Admin'),
url=reverse_lazy('wagtailapproval:admin_index'),
classnames='icon icon-cog', order=200, **kwargs):
super(ApprovalAdminMenuItem, self).__init__(
label,
url,
classnames=classnames,
order=order,
**kwargs)
def is_shown(self, request):
'''Only show the menu if the user is a superuser and any ApprovalStep
objects exist.'''
user = get_user(request)
if user.is_superuser:
return ApprovalStep.objects.exists()
return False
| bsd-2-clause | -1,745,352,902,436,354,300 | 35.009901 | 78 | 0.587022 | false | 4.65685 | false | false | false |
lulivi/debate_bot | bot.py | 1 | 5398 | #!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
import sys
import time
import telebot # Librería de la API del bot.
from telebot import types # Tipos para la API del bot.
from priv.__init__ import token as tk
bot = telebot.TeleBot(tk()) # Creamos el objeto de nuestro bot.
###############################################################################
# commands
###############################################################################
# start mensaje de bienvenida
@bot.message_handler(commands=['start'])
def command_start(m):
cid = m.chat.id
comando = m.text[7:]
if comando == 'reglas':
command_reglas(m)
else:
bot.send_message(cid,"¡Hola! Soy Debatebot.\nUsa el comando /ayuda para que te muestre mis demás comandos.\n\nEspero ser de utilidad.")
########################################
# muestra los comandos visibles
@bot.message_handler(commands=['ayuda'])
def command_ayuda(m):
bot.reply_to(m,"Guardo y doy información acerca de debates.\n/nuevo establezco el nuevo tema de debate.\n/actual muestro el tema actual de debate.\n/fin termino el debate actual.\n/reglas muestro las reglas actuales del grupo.")
########################################
# nuevo debat
@bot.message_handler(commands=['nuevo'])
def command_nuevo(m):
pos = m.text.find(" ")
cid = m.chat.id
if pos == -1:
bot.send_message(cid,m.from_user.first_name+", escribe:\n/nuevo nuevo_tema_de_debate")
else:
if get_matter(cid) == "":
set_matter(cid, m.text[pos:])
fuid = m.from_user.id
set_matter_id(cid, fuid)
bot.send_message(cid,"El tema actual se ha guardado con éxito, "+m.from_user.first_name+".")
else:
bot.send_message(cid,"Ya se está debatifino un tema, "+m.from_user.first_name+".\n/fin para terminarlo.\n/actual para obtenerlo.")
########################################
# debate actual
@bot.message_handler(commands=['actual'])
def command_actual(m):
cid = m.chat.id
actual = get_matter(cid)
if actual != "":
bot.send_message(cid,"\"* "+actual+" *\" es el tema actual.\n\n/fin para terminarlo.",parse_mode="Markdown")
else:
bot.send_message(cid,"No hay debate actualmente.\n/nuevo para comenzar uno.")
########################################
# terminar el debate
@bot.message_handler(commands=['fin'])
def command_fin(m):
cid = m.chat.id
if get_matter(cid) != "":
uid = get_matter_id(cid)
fuid = m.from_user.id
if uid == fuid:
set_matter(cid)
set_matter_id(cid,uid)
bot.send_message(cid,"Tema cerrado, "+m.from_user.first_name+".\n/nuevo para comenzar uno.")
else:
bot.send_message(cid,"No tiene permiso para terminar el debate, "+m.from_user.first_name+".")
else:
bot.send_message(cid, "No hay debate actualmente, "+m.from_user.first_name+".\n/nuevo para comenzar uno.")
########################################
REGLASID = ""
# reglas
@bot.message_handler(commands=['reglas'])
def command_to_reglas(m):
cid = m.chat.id
if cid < 0:
REGLASID = str(cid)
bot.send_message(cid,"Pulse [aquí](https://telegram.me/debate_bot?start=reglas)",parse_mode="Markdown")
else:
command_reglas(m)
def command_reglas(m):
if REGLASID != "":
reglas = get_reglas(REGLASID)
else:
cid = m.chat.id
reglas = get_reglas(cid)
if reglas != "":
bot.reply_to(m,"Reglas de participación en este grupo:\n\n"+reglas)
else:
bot.reply_to(m,"No hay relgas definidas para este grupo.")
########################################
# definir las reglas
@bot.message_handler(commands=['definereglas'])
def command_definereglas(m):
cid = m.chat.id
text = m.text
pos = text.find(" ")
if pos != -1:
txt = m.text[pos+1:]
set_reglas(cid, txt)
else:
txt = ""
set_reglas(cid, txt)
###############################################################################
# functions
###############################################################################
##### matter #####
def set_matter(chatid,txt=""):
cid = str(chatid)
with open("./matter/"+cid+".mat",'w') as f:
f.write(txt)
def get_matter(chatid):
cid = str(chatid)
with open("./matter/"+cid+".mat",'a') as f:
pass
with open("./matter/"+cid+".mat",'r') as f:
matter = f.read()
return matter
##### reglas #####
def set_reglas(chatid, txt):
cid = str(chatid)
with open("./reglas/"+cid+".rul",'w') as f:
f.write(txt)
def get_reglas(chatid):
cid = str(chatid)
with open("./reglas/"+cid+".rul",'a') as f:
pass
with open("./reglas/"+cid+".rul",'r') as f:
reglas = f.read()
return reglas
##### matter id #####
def set_matter_id(chatid,userid):
cid = str(chatid)
uid = str(userid)
with open("./matter/"+cid+".matid",'w') as f:
f.write(uid)
def get_matter_id(chatid):
cid = str(chatid)
with open("./matter/"+cid+".matid",'a') as f:
pass
with open("./matter/"+cid+".matid",'r') as f:
uid = f.read()
if uid == "":
return -1
else:
return int(uid)
###############################################################################
bot.polling()
| gpl-2.0 | 6,137,335,804,472,736,000 | 31.083333 | 232 | 0.520779 | false | 3.24113 | false | false | false |
chugunovyar/factoryForBuild | neuron/SaveClosedPossition.py | 1 | 31069 | # -*- coding: utf-8 -*-
import logging
from neuron.models import DataSet
import dateutil.parser as DP
loggermsg = logging.getLogger('django')
def saveClosedPossition(jsondata):
#loggermsg.info(len(jsondata))
# Проверяем есть ли такой ордер в БД
ifExistOrdernum = DataSet.objects.filter(open_magicnum=jsondata['magicnum'])
# Если нет такого ордера то записываем его в бд.
if len(ifExistOrdernum) == 0:
if float(jsondata['result']) > 0:
effectivnes = 1
else:
effectivnes = 0
dataToSave = DataSet(
open_magicnum = jsondata['magicnum'],\
open_neuron_name = jsondata['neuron_name'],\
open_period = jsondata['period'],\
orderOpenPrice = jsondata['openprice'],\
open_type = jsondata['open_type'],\
open_time = DP.parse(jsondata['orderopentime']),\
open_close_1 = jsondata['open_close_1'],\
open_open_1 = jsondata['open_open_1'],\
open_high_1 = jsondata['open_high_1'],\
open_low_1 = jsondata['open_low_1'],
open_upband_1 = jsondata['open_upband_1'],
open_lowband_1 = jsondata['open_lowband_1'],
open_midleband_1 = jsondata['open_midleband_1'],
open_jaw_1 = jsondata['open_jaw_1'],
open_lips_1 = jsondata['open_lips_1'],
open_teeth_1 = jsondata['open_teeth_1'],
open_volume_1 = jsondata['open_volume_1'],
open_close_2 = jsondata['open_close_2'],
open_open_2 = jsondata['open_open_2'],
open_high_2 = jsondata['open_high_2'],
open_low_2 = jsondata['open_low_2'],
open_upband_2 = jsondata['open_upband_2'],
open_lowband_2 = jsondata['open_lowband_2'],
open_midleband_2 = jsondata['open_midleband_2'],
open_jaw_2 = jsondata['open_jaw_2'],
open_lips_2 = jsondata['open_lips_2'],
open_teeth_2 = jsondata['open_teeth_2'],
open_volume_2 = jsondata['open_volume_2'],
open_close_3 = jsondata['open_close_3'],
open_open_3 = jsondata['open_open_3'],
open_high_3 = jsondata['open_high_3'],
open_low_3 = jsondata['open_low_3'],
open_upband_3 = jsondata['open_upband_3'],
open_lowband_3 = jsondata['open_lowband_3'],
open_midleband_3 = jsondata['open_midleband_3'],
open_jaw_3 = jsondata['open_jaw_3'],
open_lips_3 = jsondata['open_lips_3'],
open_teeth_3 = jsondata['open_teeth_3'],
open_volume_3 = jsondata['open_volume_3'],
open_close_4 = jsondata['open_close_4'],
open_open_4 = jsondata['open_open_4'],
open_high_4 = jsondata['open_high_4'],
open_low_4 = jsondata['open_low_4'],
open_upband_4 = jsondata['open_upband_4'],
open_lowband_4 = jsondata['open_lowband_4'],
open_midleband_4 = jsondata['open_midleband_4'],
open_jaw_4 = jsondata['open_jaw_4'],
open_lips_4 = jsondata['open_lips_4'],
open_teeth_4 = jsondata['open_teeth_4'],
open_volume_4 = jsondata['open_volume_4'],
open_close_5 = jsondata['open_close_5'],
open_open_5 = jsondata['open_open_5'],
open_high_5 = jsondata['open_high_5'],
open_low_5 = jsondata['open_low_5'],
open_upband_5 = jsondata['open_upband_5'],
open_lowband_5 = jsondata['open_lowband_5'],
open_midleband_5 = jsondata['open_midleband_5'],
open_jaw_5 = jsondata['open_jaw_5'],
open_lips_5 = jsondata['open_lips_5'],
open_teeth_5 = jsondata['open_teeth_5'],
open_volume_5 = jsondata['open_volume_5'],
open_close_6 = jsondata['open_close_6'],
open_open_6 = jsondata['open_open_6'],
open_high_6 = jsondata['open_high_6'],
open_low_6 = jsondata['open_low_6'],
open_upband_6 = jsondata['open_upband_6'],
open_lowband_6 = jsondata['open_lowband_6'],
open_midleband_6 = jsondata['open_midleband_6'],
open_jaw_6 = jsondata['open_jaw_6'],
open_lips_6 = jsondata['open_lips_6'],
open_teeth_6 = jsondata['open_teeth_6'],
open_volume_6 = jsondata['open_volume_6'],
open_close_7 = jsondata['open_close_7'],
open_open_7 = jsondata['open_open_7'],
open_high_7 = jsondata['open_high_7'],
open_low_7 = jsondata['open_low_7'],
open_upband_7 = jsondata['open_upband_7'],
open_lowband_7 = jsondata['open_lowband_7'],
open_midleband_7 = jsondata['open_midleband_7'],
open_jaw_7 = jsondata['open_jaw_7'],
open_lips_7 = jsondata['open_lips_7'],
open_teeth_7 = jsondata['open_teeth_7'],
open_volume_7 = jsondata['open_volume_7'],
open_close_8 = jsondata['open_close_8'],
open_open_8 = jsondata['open_open_8'],
open_high_8 = jsondata['open_high_8'],
open_low_8 = jsondata['open_low_8'],
open_upband_8 = jsondata['open_upband_8'],
open_lowband_8 = jsondata['open_lowband_8'],
open_midleband_8 = jsondata['open_midleband_8'],
open_jaw_8 = jsondata['open_jaw_8'],
open_lips_8 = jsondata['open_lips_8'],
open_teeth_8 = jsondata['open_teeth_8'],
open_volume_8 = jsondata['open_volume_8'],
open_close_9 = jsondata['open_close_9'],
open_open_9 = jsondata['open_open_9'],
open_high_9 = jsondata['open_high_9'],
open_low_9 = jsondata['open_low_9'],
open_upband_9 = jsondata['open_upband_9'],
open_lowband_9 = jsondata['open_lowband_9'],
open_midleband_9 = jsondata['open_midleband_9'],
open_jaw_9 = jsondata['open_jaw_9'],
open_lips_9 = jsondata['open_lips_9'],
open_teeth_9 = jsondata['open_teeth_9'],
open_volume_9 = jsondata['open_volume_9'],
open_close_10 = jsondata['open_close_10'],
open_open_10 = jsondata['open_open_10'],
open_high_10 = jsondata['open_high_10'],
open_low_10 = jsondata['open_low_10'],
open_upband_10 = jsondata['open_upband_10'],
open_lowband_10 = jsondata['open_lowband_10'],
open_midleband_10 = jsondata['open_midleband_10'],
open_jaw_10 = jsondata['open_jaw_10'],
open_lips_10 = jsondata['open_lips_10'],
open_teeth_10 = jsondata['open_teeth_10'],
open_volume_10 = jsondata['open_volume_10'],
)
dataToSave.save()
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
open_close_11 = jsondata['open_close_11'],
open_open_11 = jsondata['open_open_11'],
open_high_11 = jsondata['open_high_11'],
open_low_11 = jsondata['open_low_11'],
open_upband_11 = jsondata['open_upband_11'],
open_lowband_11 = jsondata['open_lowband_11'],
open_midleband_11 = jsondata['open_midleband_11'],
open_jaw_11 = jsondata['open_jaw_11'],
open_lips_11 = jsondata['open_lips_11'],
open_teeth_11 = jsondata['open_teeth_11'],
open_volume_11 = jsondata['open_volume_11'],
open_close_12 = jsondata['open_close_12'],
open_open_12 = jsondata['open_open_12'],
open_high_12 = jsondata['open_high_12'],
open_low_12 = jsondata['open_low_12'],
open_upband_12 = jsondata['open_upband_12'],
open_lowband_12 = jsondata['open_lowband_12'],
open_midleband_12 = jsondata['open_midleband_12'],
open_jaw_12 = jsondata['open_jaw_12'],
open_lips_12 = jsondata['open_lips_12'],
open_teeth_12 = jsondata['open_teeth_12'],
open_volume_12 = jsondata['open_volume_12'],
open_close_13 = jsondata['open_close_13'],
open_open_13 = jsondata['open_open_13'],
open_high_13 = jsondata['open_high_13'],
open_low_13 = jsondata['open_low_13'],
open_upband_13 = jsondata['open_upband_13'],
open_lowband_13 = jsondata['open_lowband_13'],
open_midleband_13 = jsondata['open_midleband_13'],
open_jaw_13 = jsondata['open_jaw_13'],
open_lips_13 = jsondata['open_lips_13'],
open_teeth_13 = jsondata['open_teeth_13'],
open_volume_13 = jsondata['open_volume_13'],
open_close_14 = jsondata['open_close_14'],
open_open_14 = jsondata['open_open_14'],
open_high_14 = jsondata['open_high_14'],
open_low_14 = jsondata['open_low_14'],
open_upband_14 = jsondata['open_upband_14'],
open_lowband_14 = jsondata['open_lowband_14'],
open_midleband_14 = jsondata['open_midleband_14'],
open_jaw_14 = jsondata['open_jaw_14'],
open_lips_14 = jsondata['open_lips_14'],
open_teeth_14 = jsondata['open_teeth_14'],
open_volume_14 = jsondata['open_volume_14'],
open_close_15 = jsondata['open_close_15'],
open_open_15 = jsondata['open_open_15'],
open_high_15 = jsondata['open_high_15'],
open_low_15 = jsondata['open_low_15'],
open_upband_15 = jsondata['open_upband_15'],
open_lowband_15 = jsondata['open_lowband_15'],
open_midleband_15 = jsondata['open_midleband_15'],
open_jaw_15 = jsondata['open_jaw_15'],
open_lips_15 = jsondata['open_lips_15'],
open_teeth_15 = jsondata['open_teeth_15'],
open_volume_15 = jsondata['open_volume_15'],
open_close_16 = jsondata['open_close_16'],
open_open_16 = jsondata['open_open_16'],
open_high_16 = jsondata['open_high_16'],
open_low_16 = jsondata['open_low_16'],
open_upband_16 = jsondata['open_upband_16'],
open_lowband_16 = jsondata['open_lowband_16'],
open_midleband_16 = jsondata['open_midleband_16'],
open_jaw_16 = jsondata['open_jaw_16'],
open_lips_16 = jsondata['open_lips_16'],
open_teeth_16 = jsondata['open_teeth_16'],
open_volume_16 = jsondata['open_volume_16'],
open_close_17 = jsondata['open_close_17'],
open_open_17 = jsondata['open_open_17'],
open_high_17 = jsondata['open_high_17'],
open_low_17 = jsondata['open_low_17'],
open_upband_17 = jsondata['open_upband_17'],
open_lowband_17 = jsondata['open_lowband_17'],
open_midleband_17 = jsondata['open_midleband_17'],
open_jaw_17 = jsondata['open_jaw_17'],
open_lips_17 = jsondata['open_lips_17'],
open_teeth_17 = jsondata['open_teeth_17'],
open_volume_17 = jsondata['open_volume_17'],
open_close_18 = jsondata['open_close_18'],
open_open_18 = jsondata['open_open_18'],
open_high_18 = jsondata['open_high_18'],
open_low_18 = jsondata['open_low_18'],
open_upband_18 = jsondata['open_upband_18'],
open_lowband_18 = jsondata['open_lowband_18'],
open_midleband_18 = jsondata['open_midleband_18'],
open_jaw_18 = jsondata['open_jaw_18'],
open_lips_18 = jsondata['open_lips_18'],
open_teeth_18 = jsondata['open_teeth_18'],
open_volume_18 = jsondata['open_volume_18'],
open_close_19 = jsondata['open_close_19'],
open_open_19 = jsondata['open_open_19'],
open_high_19 = jsondata['open_high_19'],
open_low_19 = jsondata['open_low_19'],
open_upband_19 = jsondata['open_upband_19'],
open_lowband_19 = jsondata['open_lowband_19'],
open_midleband_19 = jsondata['open_midleband_19'],
open_jaw_19 = jsondata['open_jaw_19'],
open_lips_19 = jsondata['open_lips_19'],
open_teeth_19 = jsondata['open_teeth_19'],
open_volume_19 = jsondata['open_volume_19'],
open_close_20 = jsondata['open_close_20'],
open_open_20 = jsondata['open_open_20'],
open_high_20 = jsondata['open_high_20'],
open_low_20 = jsondata['open_low_20'],
open_upband_20 = jsondata['open_upband_20'],
open_lowband_20 = jsondata['open_lowband_20'],
open_midleband_20 = jsondata['open_midleband_20'],
open_jaw_20 = jsondata['open_jaw_20'],
open_lips_20 = jsondata['open_lips_20'],
open_teeth_20 = jsondata['open_teeth_20'],
open_volume_20 = jsondata['open_volume_20'],
open_close_21 = jsondata['open_close_21'],
open_open_21 = jsondata['open_open_21'],
open_high_21 = jsondata['open_high_21'],
open_low_21 = jsondata['open_low_21'],
open_upband_21 = jsondata['open_upband_21'],
open_lowband_21 = jsondata['open_lowband_21'],
open_midleband_21 = jsondata['open_midleband_21'],
open_jaw_21 = jsondata['open_jaw_21'],
open_lips_21 = jsondata['open_lips_21'],
open_teeth_21 = jsondata['open_teeth_21'],
open_volume_21 = jsondata['open_volume_21'],
open_close_22 = jsondata['open_close_22'],
open_open_22 = jsondata['open_open_22'],
open_high_22 = jsondata['open_high_22'],
open_low_22 = jsondata['open_low_22'],
open_upband_22 = jsondata['open_upband_22'],
open_lowband_22 = jsondata['open_lowband_22'],
open_midleband_22 = jsondata['open_midleband_22'],
open_jaw_22 = jsondata['open_jaw_22'],
open_lips_22 = jsondata['open_lips_22'],
open_teeth_22 = jsondata['open_teeth_22'],
open_volume_22 = jsondata['open_volume_22'],
open_close_23 = jsondata['open_close_23'],
open_open_23 = jsondata['open_open_23'],
open_high_23 = jsondata['open_high_23'],
open_low_23 = jsondata['open_low_23'],
open_upband_23 = jsondata['open_upband_23'],
open_lowband_23 = jsondata['open_lowband_23'],
open_midleband_23 = jsondata['open_midleband_23'],
open_jaw_23 = jsondata['open_jaw_23'],
open_lips_23 = jsondata['open_lips_23'],
open_teeth_23 = jsondata['open_teeth_23'],
open_volume_23 = jsondata['open_volume_23'],
open_close_24 = jsondata['open_close_24'],
open_open_24 = jsondata['open_open_24'],
open_high_24 = jsondata['open_high_24'],
open_low_24 = jsondata['open_low_24'],
open_upband_24 = jsondata['open_upband_24'],
open_lowband_24 = jsondata['open_lowband_24'],
open_midleband_24 = jsondata['open_midleband_24'],
open_jaw_24 = jsondata['open_jaw_24'],
open_lips_24 = jsondata['open_lips_24'],
open_teeth_24 = jsondata['open_teeth_24'],
open_volume_24 = jsondata['open_volume_24']
)
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
close_close_1 = jsondata['close_close_1'],
close_open_1 = jsondata['close_open_1'],
close_high_1 = jsondata['close_high_1'],
close_low_1 = jsondata['close_low_1'],
close_upband_1 = jsondata['close_upband_1'],
close_lowband_1 = jsondata['close_lowband_1'],
close_midleband_1 = jsondata['close_midleband_1'],
close_jaw_1 = jsondata['close_jaw_1'],
close_lips_1 = jsondata['close_lips_1'],
close_teeth_1 = jsondata['close_teeth_1'],
close_volume_1 = jsondata['close_volume_1'],
close_close_2 = jsondata['close_close_2'],
close_open_2 = jsondata['close_open_2'],
close_high_2 = jsondata['close_high_2'],
close_low_2 = jsondata['close_low_2'],
close_upband_2 = jsondata['close_upband_2'],
close_lowband_2 = jsondata['close_lowband_2'],
close_midleband_2 = jsondata['close_midleband_2'],
close_jaw_2 = jsondata['close_jaw_2'],
close_lips_2 = jsondata['close_lips_2'],
close_teeth_2 = jsondata['close_teeth_2'],
close_volume_2 = jsondata['close_volume_2'],
close_close_3 = jsondata['close_close_3'],
close_open_3 = jsondata['close_open_3'],
close_high_3 = jsondata['close_high_3'],
close_low_3 = jsondata['close_low_3'],
close_upband_3 = jsondata['close_upband_3'],
close_lowband_3 = jsondata['close_lowband_3'],
close_midleband_3 = jsondata['close_midleband_3'],
close_jaw_3 = jsondata['close_jaw_3'],
close_lips_3 = jsondata['close_lips_3'],
close_teeth_3 = jsondata['close_teeth_3'],
close_volume_3 = jsondata['close_volume_3'],
close_close_4 = jsondata['close_close_4'],
close_open_4 = jsondata['close_open_4'],
close_high_4 = jsondata['close_high_4'],
close_low_4 = jsondata['close_low_4'],
close_upband_4 = jsondata['close_upband_4'],
close_lowband_4 = jsondata['close_lowband_4'],
close_midleband_4 = jsondata['close_midleband_4'],
close_jaw_4 = jsondata['close_jaw_4'],
close_lips_4 = jsondata['close_lips_4'],
close_teeth_4 = jsondata['close_teeth_4'],
close_volume_4 = jsondata['close_volume_4'],
close_close_5 = jsondata['close_close_5'],
close_open_5 = jsondata['close_open_5'],
close_high_5 = jsondata['close_high_5'],
close_low_5 = jsondata['close_low_5'],
close_upband_5 = jsondata['close_upband_5'],
close_lowband_5 = jsondata['close_lowband_5'],
close_midleband_5 = jsondata['close_midleband_5'],
close_jaw_5 = jsondata['close_jaw_5'],
close_lips_5 = jsondata['close_lips_5'],
close_teeth_5 = jsondata['close_teeth_5'],
close_volume_5 = jsondata['close_volume_5'],
close_close_6 = jsondata['close_close_6'],
close_open_6 = jsondata['close_open_6'],
close_high_6 = jsondata['close_high_6'],
close_low_6 = jsondata['close_low_6'],
close_upband_6 = jsondata['close_upband_6'],
close_lowband_6 = jsondata['close_lowband_6'],
close_midleband_6 = jsondata['close_midleband_6'],
close_jaw_6 = jsondata['close_jaw_6'],
close_lips_6 = jsondata['close_lips_6'],
close_teeth_6 = jsondata['close_teeth_6'],
close_volume_6 = jsondata['close_volume_6'],
close_close_7 = jsondata['close_close_7'],
close_open_7 = jsondata['close_open_7'],
close_high_7 = jsondata['close_high_7'],
close_low_7 = jsondata['close_low_7'],
close_upband_7 = jsondata['close_upband_7'],
close_lowband_7 = jsondata['close_lowband_7'],
close_midleband_7 = jsondata['close_midleband_7'],
close_jaw_7 = jsondata['close_jaw_7'],
close_lips_7 = jsondata['close_lips_7'],
close_teeth_7 = jsondata['close_teeth_7'],
close_volume_7 = jsondata['close_volume_7'],
close_close_8 = jsondata['close_close_8'],
close_open_8 = jsondata['close_open_8'],
close_high_8 = jsondata['close_high_8'],
close_low_8 = jsondata['close_low_8'],
close_upband_8 = jsondata['close_upband_8'],
close_lowband_8 = jsondata['close_lowband_8'],
close_midleband_8 = jsondata['close_midleband_8'],
close_jaw_8 = jsondata['close_jaw_8'],
close_lips_8 = jsondata['close_lips_8'],
close_teeth_8 = jsondata['close_teeth_8'],
close_volume_8 = jsondata['close_volume_8'],
close_close_9 = jsondata['close_close_9'],
close_open_9 = jsondata['close_open_9'],
close_high_9 = jsondata['close_high_9'],
close_low_9 = jsondata['close_low_9'],
close_upband_9 = jsondata['close_upband_9'],
close_lowband_9 = jsondata['close_lowband_9'],
close_midleband_9 = jsondata['close_midleband_9'],
close_jaw_9 = jsondata['close_jaw_9'],
close_lips_9 = jsondata['close_lips_9'],
close_teeth_9 = jsondata['close_teeth_9'],
close_volume_9 = jsondata['close_volume_9'],
close_close_10 = jsondata['close_close_10'],
close_open_10 = jsondata['close_open_10'],
close_high_10 = jsondata['close_high_10'],
close_low_10 = jsondata['close_low_10'],
close_upband_10 = jsondata['close_upband_10'],
close_lowband_10 = jsondata['close_lowband_10'],
close_midleband_10 = jsondata['close_midleband_10'],
close_jaw_10 = jsondata['close_jaw_10'],
close_lips_10 = jsondata['close_lips_10'],
close_teeth_10 = jsondata['close_teeth_10'],
close_volume_10 = jsondata['close_volume_10'],
close_close_11 = jsondata['close_close_11'],
close_open_11 = jsondata['close_open_11'],
close_high_11 = jsondata['close_high_11'],
close_low_11 = jsondata['close_low_11'],
close_upband_11 = jsondata['close_upband_11'],
close_lowband_11 = jsondata['close_lowband_11'],
close_midleband_11 = jsondata['close_midleband_11'],
close_jaw_11 = jsondata['close_jaw_11'],
close_lips_11 = jsondata['close_lips_11'],
close_teeth_11 = jsondata['close_teeth_11'],
close_volume_11 = jsondata['close_volume_11'],
close_close_12 = jsondata['close_close_12'],
close_open_12 = jsondata['close_open_12'],
close_high_12 = jsondata['close_high_12'],
close_low_12 = jsondata['close_low_12'],
close_upband_12 = jsondata['close_upband_12'],
close_lowband_12 = jsondata['close_lowband_12'],
close_midleband_12 = jsondata['close_midleband_12'],
close_jaw_12 = jsondata['close_jaw_12'],
close_lips_12 = jsondata['close_lips_12'],
close_teeth_12 = jsondata['close_teeth_12'],
close_volume_12 = jsondata['close_volume_12'],
)
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
close_close_13 = jsondata['close_close_13'],
close_open_13 = jsondata['close_open_13'],
close_high_13 = jsondata['close_high_13'],
close_low_13 = jsondata['close_low_13'],
close_upband_13 = jsondata['close_upband_13'],
close_lowband_13 = jsondata['close_lowband_13'],
close_midleband_13 = jsondata['close_midleband_13'],
close_jaw_13 = jsondata['close_jaw_13'],
close_lips_13 = jsondata['close_lips_13'],
close_teeth_13 = jsondata['close_teeth_13'],
close_volume_13 = jsondata['close_volume_13'],
close_close_14 = jsondata['close_close_14'],
close_open_14 = jsondata['close_open_14'],
close_high_14 = jsondata['close_high_14'],
close_low_14 = jsondata['close_low_14'],
close_upband_14 = jsondata['close_upband_14'],
close_lowband_14 = jsondata['close_lowband_14'],
close_midleband_14 = jsondata['close_midleband_14'],
close_jaw_14 = jsondata['close_jaw_14'],
close_lips_14 = jsondata['close_lips_14'],
close_teeth_14 = jsondata['close_teeth_14'],
close_volume_14 = jsondata['close_volume_14'],
close_close_15 = jsondata['close_close_15'],
close_open_15 = jsondata['close_open_15'],
close_high_15 = jsondata['close_high_15'],
close_low_15 = jsondata['close_low_15'],
close_upband_15 = jsondata['close_upband_15'],
close_lowband_15 = jsondata['close_lowband_15'],
close_midleband_15 = jsondata['close_midleband_15'],
close_jaw_15 = jsondata['close_jaw_15'],
close_lips_15 = jsondata['close_lips_15'],
close_teeth_15 = jsondata['close_teeth_15'],
close_volume_15 = jsondata['close_volume_15'],
close_close_16 = jsondata['close_close_16'],
close_open_16 = jsondata['close_open_16'],
close_high_16 = jsondata['close_high_16'],
close_low_16 = jsondata['close_low_16'],
close_upband_16 = jsondata['close_upband_16'],
close_lowband_16 = jsondata['close_lowband_16'],
close_midleband_16 = jsondata['close_midleband_16'],
close_jaw_16 = jsondata['close_jaw_16'],
close_lips_16 = jsondata['close_lips_16'],
close_teeth_16 = jsondata['close_teeth_16'],
close_volume_16 = jsondata['close_volume_16'],
close_close_17 = jsondata['close_close_17'],
close_open_17 = jsondata['close_open_17'],
close_high_17 = jsondata['close_high_17'],
close_low_17 = jsondata['close_low_17'],
close_upband_17 = jsondata['close_upband_17'],
close_lowband_17 = jsondata['close_lowband_17'],
close_midleband_17 = jsondata['close_midleband_17'],
close_jaw_17 = jsondata['close_jaw_17'],
close_lips_17 = jsondata['close_lips_17'],
close_teeth_17 = jsondata['close_teeth_17'],
close_volume_17 = jsondata['close_volume_17'],
close_close_18 = jsondata['close_close_18'],
close_open_18 = jsondata['close_open_18'],
close_high_18 = jsondata['close_high_18'],
close_low_18 = jsondata['close_low_18'],
close_upband_18 = jsondata['close_upband_18'],
close_lowband_18 = jsondata['close_lowband_18'],
close_midleband_18 = jsondata['close_midleband_18'],
close_jaw_18 = jsondata['close_jaw_18'],
close_lips_18 = jsondata['close_lips_18'],
close_teeth_18 = jsondata['close_teeth_18'],
close_volume_18 = jsondata['close_volume_18'],
close_close_19 = jsondata['close_close_19'],
close_open_19 = jsondata['close_open_19'],
close_high_19 = jsondata['close_high_19'],
close_low_19 = jsondata['close_low_19'],
close_upband_19 = jsondata['close_upband_19'],
close_lowband_19 = jsondata['close_lowband_19'],
close_midleband_19 = jsondata['close_midleband_19'],
close_jaw_19 = jsondata['close_jaw_19'],
close_lips_19 = jsondata['close_lips_19'],
close_teeth_19 = jsondata['close_teeth_19'],
close_volume_19 = jsondata['close_volume_19'],
close_close_20 = jsondata['close_close_20'],
close_open_20 = jsondata['close_open_20'],
close_high_20 = jsondata['close_high_20'],
close_low_20 = jsondata['close_low_20'],
close_upband_20 = jsondata['close_upband_20'],
close_lowband_20 = jsondata['close_lowband_20'],
close_midleband_20 = jsondata['close_midleband_20'],
close_jaw_20 = jsondata['close_jaw_20'],
close_lips_20 = jsondata['close_lips_20'],
close_teeth_20 = jsondata['close_teeth_20'],
close_volume_20 = jsondata['close_volume_20'],
close_close_21 = jsondata['close_close_21'],
close_open_21 = jsondata['close_open_21'],
close_high_21 = jsondata['close_high_21'],
close_low_21 = jsondata['close_low_21'],
close_upband_21 = jsondata['close_upband_21'],
close_lowband_21 = jsondata['close_lowband_21'],
close_midleband_21 = jsondata['close_midleband_21'],
close_jaw_21 = jsondata['close_jaw_21'],
close_lips_21 = jsondata['close_lips_21'],
close_teeth_21 = jsondata['close_teeth_21'],
close_volume_21 = jsondata['close_volume_21'],
close_close_22 = jsondata['close_close_22'],
close_open_22 = jsondata['close_open_22'],
close_high_22 = jsondata['close_high_22'],
close_low_22 = jsondata['close_low_22'],
close_upband_22 = jsondata['close_upband_22'],
close_lowband_22 = jsondata['close_lowband_22'],
close_midleband_22 = jsondata['close_midleband_22'],
close_jaw_22 = jsondata['close_jaw_22'],
close_lips_22 = jsondata['close_lips_22'],
close_teeth_22 = jsondata['close_teeth_22'],
close_volume_22 = jsondata['close_volume_22'],
close_close_23 = jsondata['close_close_23'],
close_open_23 = jsondata['close_open_23'],
close_high_23 = jsondata['close_high_23'],
close_low_23 = jsondata['close_low_23'],
close_upband_23 = jsondata['close_upband_23'],
close_lowband_23 = jsondata['close_lowband_23'],
close_midleband_23 = jsondata['close_midleband_23'],
close_jaw_23 = jsondata['close_jaw_23'],
close_lips_23 = jsondata['close_lips_23'],
close_teeth_23 = jsondata['close_teeth_23'],
close_volume_23 = jsondata['close_volume_23'],
close_close_24 = jsondata['close_close_24'],
close_open_24 = jsondata['close_open_24'],
close_high_24 = jsondata['close_high_24'],
close_low_24 = jsondata['close_low_24'],
close_upband_24 = jsondata['close_upband_24'],
close_lowband_24 = jsondata['close_lowband_24'],
close_midleband_24 = jsondata['close_midleband_24'],
close_jaw_24 = jsondata['close_jaw_24'],
close_lips_24 = jsondata['close_lips_24'],
close_teeth_24 = jsondata['close_teeth_24'],
close_volume_24 = jsondata['close_volume_24'],
close_result = jsondata['result'],
close_effectivnes = effectivnes,
close_neuron_name = jsondata['neuron_name'],
close_closeprice = jsondata['closeprice'],
close_time = DP.parse(jsondata['orderclosetime'])
)
| gpl-3.0 | -1,601,113,315,527,009,800 | 49.413008 | 135 | 0.546768 | false | 3.238692 | false | false | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/core/checks/model_checks.py | 1 | 2454 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import types
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
errors = []
for model in apps.get_models():
if app_configs is None or model._meta.app_config in app_configs:
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is "
"currently overridden by %r." % (
model.__name__, model.check),
hint=None,
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
return errors
@register(Tags.models, Tags.signals)
def check_model_signals(app_configs=None, **kwargs):
"""
Ensure lazily referenced model signals senders are installed.
"""
# Avoid circular import
from django.db import models
errors = []
for name in dir(models.signals):
obj = getattr(models.signals, name)
if isinstance(obj, models.signals.ModelSignal):
for reference, receivers in obj.unresolved_references.items():
for receiver, _, _ in receivers:
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The '%s' function" % receiver.__name__
else:
description = "An instance of the '%s' class" % receiver.__class__.__name__
errors.append(
Error(
"%s was connected to the '%s' signal "
"with a lazy reference to the '%s' sender, "
"which has not been installed." % (
description, name, '.'.join(reference)
),
obj=receiver.__module__,
hint=None,
id='signals.E001'
)
)
return errors
| mit | 6,105,422,011,354,093,000 | 36.34375 | 99 | 0.467808 | false | 5.155462 | false | false | false |
ahmetcemturan/SFACT | skeinforge_application/skeinforge_plugins/craft_plugins/limit.py | 1 | 8282 | #! /usr/bin/env python
"""
This page is in the table of contents.
This plugin limits the feed rate of the tool head, so that the stepper motors are not driven too fast and skip steps.
The limit manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Limit
The maximum z feed rate is defined in speed.
==Operation==
The default 'Activate Limit' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Maximum Initial Feed Rate===
Default is one millimeter per second.
Defines the maximum speed of the inital tool head move.
==Examples==
The following examples limit the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and limit.py.
> python limit.py
This brings up the limit dialog.
> python limit.py Screw Holder Bottom.stl
The limit tool is parsing the file:
Screw Holder Bottom.stl
..
The limit tool has created the file:
.. Screw Holder Bottom_limit.gcode
"""
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from datetime import date
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/28/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, gcodeText='', repository=None):
'Limit a gcode file or text.'
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), repository )
def getCraftedTextFromText(gcodeText, repository=None):
'Limit a gcode text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'limit'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(LimitRepository())
if not repository.activateLimit.value:
return gcodeText
return LimitSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return LimitRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Limit a gcode file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'limit', shouldAnalyze)
class LimitRepository:
'A class to handle the limit settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.limit.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Limit', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Limit')
self.activateLimit = settings.BooleanSetting().getFromValue('Activate Limit', self, False)
self.maximumInitialFeedRate = settings.FloatSpin().getFromValue(0.5, 'Maximum Initial Feed Rate (mm/s):', self, 10.0, 1.0)
self.executeTitle = 'Limit'
def execute(self):
'Limit button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class LimitSkein:
'A class to limit a skein of extrusions.'
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.feedRateMinute = None
self.lineIndex = 0
self.maximumZDrillFeedRatePerSecond = 987654321.0
self.maximumZFeedRatePerSecond = 2.0
self.oldLocation = None
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the limit gcode.'
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.maximumZDrillFeedRatePerSecond = min(self.maximumZDrillFeedRatePerSecond, self.maximumZFeedRatePerSecond)
self.maximumZCurrentFeedRatePerSecond = self.maximumZFeedRatePerSecond
for lineIndex in xrange(self.lineIndex, len(self.lines)):
self.parseLine( lineIndex )
return self.distanceFeedRate.output.getvalue()
def getLimitedInitialMovement(self, line, splitLine):
'Get a limited linear movement.'
if self.oldLocation == None:
line = self.distanceFeedRate.getLineWithFeedRate(60.0 * self.repository.maximumInitialFeedRate.value, line, splitLine)
return line
def getZLimitedLine(self, deltaZ, distance, line, splitLine):
'Get a replaced z limited gcode movement line.'
zFeedRateSecond = self.feedRateMinute * deltaZ / distance / 60.0
if zFeedRateSecond <= self.maximumZCurrentFeedRatePerSecond:
return line
limitedFeedRateMinute = self.feedRateMinute * self.maximumZCurrentFeedRatePerSecond / zFeedRateSecond
return self.distanceFeedRate.getLineWithFeedRate(limitedFeedRateMinute, line, splitLine)
def getZLimitedLineArc(self, line, splitLine):
'Get a replaced z limited gcode arc movement line.'
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
if self.feedRateMinute == None or self.oldLocation == None:
return line
relativeLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.oldLocation += relativeLocation
deltaZ = abs(relativeLocation.z)
distance = gcodec.getArcDistance(relativeLocation, splitLine)
return self.getZLimitedLine(deltaZ, distance, line, splitLine)
def getZLimitedLineLinear(self, line, location, splitLine):
'Get a replaced z limited gcode linear movement line.'
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
if location == self.oldLocation:
return ''
if self.feedRateMinute == None or self.oldLocation == None:
return line
deltaZ = abs(location.z - self.oldLocation.z)
distance = abs(location - self.oldLocation)
return self.getZLimitedLine(deltaZ, distance, line, splitLine)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('limit')
return
elif firstWord == '(<maximumZDrillFeedRatePerSecond>':
self.maximumZDrillFeedRatePerSecond = float(splitLine[1])
elif firstWord == '(<maximumZFeedRatePerSecond>':
self.maximumZFeedRatePerSecond = float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine( self, lineIndex ):
'Parse a gcode line and add it to the limit skein.'
line = self.lines[lineIndex].lstrip()
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
line = self.getLimitedInitialMovement(line, splitLine)
line = self.getZLimitedLineLinear(line, location, splitLine)
self.oldLocation = location
elif firstWord == 'G2' or firstWord == 'G3':
line = self.getZLimitedLineArc(line, splitLine)
elif firstWord == 'M101':
self.maximumZCurrentFeedRatePerSecond = self.maximumZDrillFeedRatePerSecond
elif firstWord == 'M103':
self.maximumZCurrentFeedRatePerSecond = self.maximumZFeedRatePerSecond
self.distanceFeedRate.addLine(line)
def main():
'Display the limit dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| agpl-3.0 | 246,727,834,341,910,940 | 40 | 180 | 0.781574 | false | 3.328778 | false | false | false |
mfnch/pyrtist | old/web/in/examples/create_example.py | 1 | 2754 | import sys, os, os.path, commands, re
usage = "USAGE: python create_example.py box.example"
if len(sys.argv) != 2:
raise "Expected one argument.\n" + usage
example_file = sys.argv[1]
print "Working on '%s'..." % example_file
# Default values for variables which may be changed inside example_file
in_directory = ".."
box = "box -l g"
convert = "convert"
convert_opts = ""
highlight = "%s/../katehighlight/bin/highlight" % in_directory
rst_skeleton = "skeleton"
rst_out = None
title = None
description = None
figure_caption = None
box_source = None
out_eps = None
out_png = None
_f = open(example_file)
exec(_f)
_f.close()
if title == None:
title = "Box example: %s" % crumb
print "Removing old figure if present..."
if out_eps and os.access(out_eps, os.W_OK):
try:
os.remove(out_eps)
except:
print "Failed to remove the figure: continuing anyway..."
print "Executing the Box program..."
print commands.getoutput("%s %s" % (box, box_source))
have_figure = False
if out_eps and os.access(out_eps, os.R_OK):
print "Adjusting eps figure..."
out_png = os.path.splitext(out_eps)[0] + ".png"
print commands.getoutput("%s %s %s %s" %
(convert, convert_opts, out_eps, out_png))
print out_png
have_figure = os.access(out_png, os.R_OK)
if not have_figure:
raise "The figure '%s' has not been produced: stopping here!" % out_png
print "Highlighting the Box source..."
highlighted_source = "/tmp/h.html"
print commands.getoutput("%s Box %s %s" % (highlight, box_source, highlighted_source))
f = open(highlighted_source, "r")
htmlized_box_program = f.read()
f.close()
print "Opening the skeleton..."
f = open(rst_skeleton, "r")
data_skeleton = f.read()
f.close()
vars_dict = {
'title': title,
'description': description,
'crumb': crumb,
'box_file':box_source,
'figure_caption':figure_caption,
'image': out_png,
'htmlized_box_program': htmlized_box_program
}
r = re.compile("[$][^$]*[$]")
def substitutor(var):
try:
var_name = var.group(0)[1:-1]
except:
raise "Error when substituting variable."
if vars_dict.has_key(var_name):
return str(vars_dict[var_name])
print "WARNING: Variable '%s' not found!" % var_name
return var.group(0)
print "Filling the skeleton..."
out = re.sub(r, substitutor, data_skeleton)
f = open(rst_out, "w")
f.write(out)
f.close()
print "Output produced (%s)" % rst_out
print "Generating thumbnail..."
html_out = os.path.splitext(out_png)[0] + ".html"
out_thumb_png = "small_" + out_png
scale_opts = "-scale 100"
print commands.getoutput("%s %s %s %s"
% (convert, scale_opts, out_png, out_thumb_png))
f = open("thumbnails.dat", "a")
f.write("%s, %s\n" % (html_out, out_thumb_png))
f.close()
| lgpl-2.1 | 1,265,988,056,238,007,300 | 24.738318 | 86 | 0.649601 | false | 2.932907 | false | false | false |
tkwon/dj-stripe | djstripe/migrations/0025_auto_20170322_0428.py | 1 | 3906 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 04:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0024_auto_20170308_0757'),
]
operations = [
migrations.AlterField(
model_name='account',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='account',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='charge',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='charge',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='customer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='customer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoice',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='plan',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='plan',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='stripesource',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='stripesource',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='subscription',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='subscription',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='transfer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='transfer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
| mit | 8,493,379,797,407,598,000 | 30.248 | 58 | 0.536354 | false | 4.746051 | false | false | false |
eqcorrscan/ci.testing | eqcorrscan/utils/stacking.py | 1 | 6254 | """
Utility module of the EQcorrscan package to allow for different methods of \
stacking of seismic signal in one place.
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.signal import hilbert
from copy import deepcopy
from eqcorrscan.core.match_filter import normxcorr2
def linstack(streams, normalize=True):
"""
Compute the linear stack of a series of seismic streams of \
multiplexed data.
:type streams: list
:param streams: List of streams to stack
:type normalize: bool
:param normalize: Normalize traces before stacking, normalizes by the RMS \
amplitude.
:returns: stacked data
:rtype: :class:`obspy.core.stream.Stream`
"""
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
if normalize:
for tr in stack:
tr.data = tr.data / np.sqrt(np.mean(np.square(tr.data)))
tr.data = np.nan_to_num(tr.data)
for i in range(1, len(streams)):
for tr in stack:
matchtr = streams[i].select(station=tr.stats.station,
channel=tr.stats.channel)
if matchtr:
# Normalize the data before stacking
if normalize:
norm = matchtr[0].data /\
np.sqrt(np.mean(np.square(matchtr[0].data)))
norm = np.nan_to_num(norm)
else:
norm = matchtr[0].data
tr.data = np.sum((norm, tr.data), axis=0)
return stack
def PWS_stack(streams, weight=2, normalize=True):
"""
Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream`
"""
# First get the linear stack which we will weight by the phase stack
Linstack = linstack(streams)
# Compute the instantaneous phase
instaphases = []
print("Computing instantaneous phase")
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
envelope = np.sqrt(np.sum((np.square(analytic),
np.square(tr.data)), axis=0))
tr.data = analytic / envelope
instaphases.append(instaphase)
# Compute the phase stack
print("Computing the phase stack")
Phasestack = linstack(instaphases, normalize=normalize)
# Compute the phase-weighted stack
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station)[0].data *\
np.abs(tr.data ** weight)
return Phasestack
def align_traces(trace_list, shift_len, master=False, positive=False,
plot=False):
"""
Align traces relative to each other based on their cross-correlation value.
Uses the :func:`obspy.signal.cross_correlation.xcorr` function to find the
optimum shift to align traces relative to a master event. Either uses a
given master to align traces, or uses the first trace in the list.
.. Note::
The cross-correlation function may yield an error/warning
about shift_len being too large: this is raised by the
:func:`obspy.signal.cross_correlation.xcorr` routine when the shift_len
is greater than half the length of either master or a trace, then
the correlation will not be robust. We may switch to a different
correlation routine later.
:type trace_list: list
:param trace_list: List of traces to align
:type shift_len: int
:param shift_len: Length to allow shifting within in samples
:type master: obspy.core.trace.Trace
:param master: Master trace to align to, if set to False will align to \
the largest amplitude trace (default)
:type positive: bool
:param positive: Return the maximum positive cross-correlation, or the \
absolute maximum, defaults to False (absolute maximum).
:type plot: bool
:param plot: If true, will plot each trace aligned with the master.
:returns: list of shifts and correlations for best alignment in seconds.
:rtype: list
"""
from eqcorrscan.utils.plotting import xcorr_plot
traces = deepcopy(trace_list)
if not master:
# Use trace with largest MAD amplitude as master
master = traces[0]
MAD_master = np.median(np.abs(master.data))
for i in range(1, len(traces)):
if np.median(np.abs(traces[i])) > MAD_master:
master = traces[i]
MAD_master = np.median(np.abs(master.data))
else:
print('Using master given by user')
shifts = []
ccs = []
for i in range(len(traces)):
if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
raise ValueError('Sampling rates not the same')
cc_vec = normxcorr2(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32))
cc_vec = cc_vec[0]
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
if plot:
xcorr_plot(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32), shift=shift,
cc=cc)
shift -= shift_len
if cc < 0 and positive:
cc = cc_vec.max()
shift = cc_vec.argmax() - shift_len
shifts.append(shift / master.stats.sampling_rate)
ccs.append(cc)
return shifts, ccs
if __name__ == "__main__":
import doctest
doctest.testmod()
| lgpl-3.0 | 6,233,989,075,923,252,000 | 35.573099 | 79 | 0.624081 | false | 3.948232 | false | false | false |
gspilio/nova | nova/network/quantumv2/api.py | 1 | 41934 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import time
from oslo.config import cfg
from nova.compute import instance_types
from nova import conductor
from nova import context
from nova.db import base
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
help='URL for connecting to quantum'),
cfg.IntOpt('quantum_url_timeout',
default=30,
help='timeout value for connecting to quantum in seconds'),
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
help='password for connecting to quantum in admin context',
secret=True),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_region_name',
help='region name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
cfg.BoolOpt('quantum_api_insecure',
default=False,
help='if set, ignore any SSL validation issues'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
# TODO(berrange) temporary hack until Quantum can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('quantum_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('quantum_extension_sync_interval',
default=600,
help='Number of seconds before querying quantum for'
' extensions'),
]
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
refresh_cache = network_api.refresh_cache
update_instance_info_cache = network_api.update_instance_cache_with_nw_info
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
conductor_api = conductor.API()
security_group_api = openstack_driver.get_openstack_security_group_driver()
def __init__(self):
super(API, self).__init__()
self.last_quantum_extension_sync = None
self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
quantum = quantumv2.get_client(context)
# If user has specified to attach instance only to specific
# networks, add them to **search_opts
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {"tenant_id": project_id, 'shared': False}
if net_ids:
search_opts['id'] = net_ids
nets = quantum.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
if net_ids:
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
@refresh_cache
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
TODO(someone): document the rest of these parameters.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: QuantumV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = quantum.show_port(port_id)['port']
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = quantum.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
msg = (_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific."),
security_group)
raise exception.NoUniqueMatch(msg)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
security_group_ids.append(name_match)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
if port:
quantum.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
else:
fixed_ip = fixed_ips.get(network_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = (
security_group_ids)
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
self._populate_quantum_extension_values(instance,
port_req_body)
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
for port_id in created_port_ids:
try:
quantum.delete_port(port_id)
except Exception as ex:
msg = _("Fail to delete port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': port_id,
'exception': ex})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
nw_info = self._get_instance_nw_info(context, instance, networks=nets)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_quantum_extensions_cache(self):
if (not self.last_quantum_extension_sync or
((time.time() - self.last_quantum_extension_sync)
>= CONF.quantum_extension_sync_interval)):
quantum = quantumv2.get_client(context.get_admin_context())
extensions_list = quantum.list_extensions()['extensions']
self.last_quantum_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _populate_quantum_extension_values(self, instance, port_req_body):
self._refresh_quantum_extensions_cache()
if 'nvp-qos' in self.extensions:
instance_type = instance_types.extract_instance_type(instance)
rxtx_factor = instance_type.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
for port in ports:
try:
quantumv2.get_client(context).delete_port(port['id'])
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(portid)s ")
% {'portid': port['id']})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
@refresh_cache
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)],
conductor_api=conductor_api)
@refresh_cache
def deallocate_port_for_instance(self, context, instance, port_id,
conductor_api=None):
try:
quantumv2.get_client(context).delete_port(port_id)
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(port_id)s ") %
locals())
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
return self._get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
return quantumv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
return quantumv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, conductor_api=None,
networks=None):
result = self._get_instance_nw_info(context, instance, networks)
update_instance_info_cache(self, context, instance, result,
conductor_api)
return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id,
conductor_api=None):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
return
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address,
conductor_api=None):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
LOG.debug(_('validate_networks() for %s'),
requested_networks)
if not requested_networks:
return
net_ids = []
for (net_id, _i, port_id) in requested_networks:
if not port_id:
net_ids.append(net_id)
continue
port = quantumv2.get_client(context).show_port(port_id).get('port')
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
net_id = port['network_id']
if net_id in net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
net_ids.append(net_id)
nets = self._get_available_networks(context, context.project_id,
net_ids)
if len(nets) != len(net_ids):
requsted_netid_set = set(net_ids)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requsted_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def trigger_instance_add_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_add_security_group', instance_ref, group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_remove_security_group', instance_ref, group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
self.conductor_api.security_groups_trigger_members_refresh(
admin_context, group_ids)
self.conductor_api.security_groups_trigger_handler(admin_context,
'security_group_members', group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
client = quantumv2.get_client(context)
networks = client.list_networks().get('networks') or {}
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = quantumv2.get_client(context)
fip = client.show_floatingip(id)['floatingip']
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = quantumv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Quantum v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Quantum v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = quantumv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return []
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = quantumv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
fip = client.create_floatingip(param)
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
data = client.list_floatingips(fixed_ip_address=fixed_ip, port_id=port)
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = quantumv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
ports = data.get('ports', [])
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'])
else:
# ensure ports are in preferred network order
_ensure_requested_network_ordering(
lambda x: x['network_id'],
ports,
[n['id'] for n in networks])
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
break
if network_name is None:
raise exception.NotFound(_('Network %(net)s for '
'port %(port_id)s not found!') %
{'net': port['network_id'],
'port': port['id']})
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Quantum should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.quantum_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
devname = "tap" + port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
network=network,
type=port.get('binding:vif_type'),
ovs_interfaceid=ovs_interfaceid,
devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = quantumv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| apache-2.0 | 8,649,179,692,638,891,000 | 43.374603 | 79 | 0.563147 | false | 4.311536 | false | false | false |
Chealion/yycbike | archive/weatherLoad.py | 1 | 6271 | #! /usr/bin/python
# :set tabstop=4 shiftwidth=4 expandtab
# Downoads Environment Canada data and sends the data to Graphite. Additionally logs the data to a file we can use to import later
import csv
import time
import graphitesend
import urllib2
from datetime import date, timedelta
import datetime
graphitesend.init(graphite_server='localhost',prefix='yycbike',system_name='')
metriclog = open('/home/ubuntu/devmetriclog.log', 'a')
# Watch out for timezones - this script fails to function past 5 PM MST.
yesterday = date.today() - timedelta(1)
year = yesterday.strftime('%Y')
month = yesterday.strftime('%m')
day = yesterday.strftime('%d')
#Installations
# URLs per ftp://ftp.tor.ec.gc.ca/Pub/Get_More_Data_Plus_de_donnees/Readme.txt
HOURLY_URL='http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=50430&Year=' + year + '&Month=' + month + '&Day=' + day + '&submit=Download+Data&timeframe=1'
DAILY_URL= 'http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=50430&Year=' + year + '&Month=' + month + '&Day=' + day + '&submit=Download+Data&timeframe=2'
## HOURLY
url = HOURLY_URL
print 'Loading Hourly Weather Data...'
response = urllib2.urlopen(url)
csv_data = response.read()
# Delete first 17 lines - up to and inlcuding header line
cleaned_data = '\n'.join(csv_data.split('\n')[17:])
# split into list, and use non unicode field names
csv_reader = csv.DictReader(cleaned_data.split('\n'), fieldnames=['Date', 'Year', 'Month', 'Day', 'Time', 'Quality', 'Temp', 'TempFlag', 'DewPoint', 'DewPointFlag', 'Humidity', 'HumFlag', 'WindDir', 'WindFlag', 'WindSpd', 'WindFlg', 'Visbility', 'VisFlag', 'Pressure', 'PressFlag', 'Humidex', 'HmdxFlag', 'WindChill', 'WindChillFlag', 'Weather'])
for row in csv_reader:
#Create timestamp
timestamp = time.mktime(datetime.datetime.strptime(row['Date'], "%Y-%m-%d %H:%M").timetuple())
yesterday_timestamp = float(yesterday.strftime('%s'))
#Ignore any data "newer" than yesterday. Data that doesn't exist yet.
if timestamp > yesterday_timestamp:
break
else:
timestamp = str(int(timestamp))
#print row
# Data Cleaning - Wind Chill or Humidex - merge
if row['Temp'] is None or row['Temp'] == '':
continue
if row['Humidex'] == '' and row['WindChill'] == '':
feelslike = row['Temp']
elif row['Humidex'] == '':
feelslike = row['WindChill']
else:
feelslike = row['Humidex']
if row['WindSpd'] == '':
row['WindSpd'] = 0
if row['WindDir'] == '':
row['WindDir'] = 0
metric_string = 'weather.hourly.temp ' + str(row['Temp']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.temp', str(row['Temp']), timestamp)
metric_string = 'weather.hourly.windspeed ' + str(row['WindSpd']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.windspeed', str(row['WindSpd']), timestamp)
metric_string = 'weather.hourly.winddir ' + str(row['WindDir']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.winddir', str(row['WindDir']), timestamp)
metric_string = 'weather.hourly.humidity ' + str(row['Humidity']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.humidity', str(row['Humidity']), timestamp)
metric_string = 'weather.hourly.feelslike ' + str(feelslike) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.feelslike', str(feelslike), timestamp)
## DAILY
url = DAILY_URL
print 'Loading Daily Weather Data...'
response = urllib2.urlopen(url)
csv_data = response.read()
# Delete first 26 lines - up to and including header line
cleaned_data = '\n'.join(csv_data.split('\n')[26:])
# split into list, and use non unicode field names
csv_reader = csv.DictReader(cleaned_data.split('\n'), fieldnames=['Date', 'Year', 'Month', 'Day', 'Quality', 'Max', 'MaxFlag', 'Min', 'MinFlag', 'Mean', 'MeanFlag', 'Heat1', 'Heat2', 'Heat3', 'Heat4', 'Rain', 'RainFlag', 'Snow', 'SnowFlag', 'TotalPrecip', 'PrecipFlag', 'SnowonGround', 'SnowFlag', 'Wind1', 'Wind2', 'Wind3', 'Wind4'])
for row in csv_reader:
#Create timestamp
timestamp = time.mktime(datetime.datetime.strptime(row['Date'], "%Y-%m-%d").timetuple())
yesterday_timestamp = float(yesterday.strftime('%s'))
#Ignore any data "newer" than yesterday. Data that doesn't exist yet.
if timestamp > yesterday_timestamp:
break
else:
timestamp = str(int(timestamp))
#print row
if row['Max'] is None or row['Max'] == '' or row['Min'] == '':
continue
metric_string = 'weather.daily.high ' + str(row['Max']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.high', str(row['Max']), timestamp)
metric_string = 'weather.daily.low ' + str(row['Min']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.low', str(row['Min']), timestamp)
metric_string = 'weather.daily.mean ' + str(row['Mean']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.mean', str(row['Mean']), timestamp)
# Data Cleaning
if row['TotalPrecip'] == '':
row['TotalPrecip'] = 0
metric_string = 'weather.daily.precip ' + str(row['TotalPrecip']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.precip', str(row['TotalPrecip']), timestamp)
# Data Cleaning
if row['SnowonGround'] == '':
row['SnowonGround'] = 0
metric_string = 'weather.daily.snowamt ' + str(row['SnowonGround']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.snowamt', str(row['SnowonGround']), timestamp)
# OUTPUT FORMAT:
# <metric path> <metric value> <metric timestamp>
# yycbike.peacebridge.north.trips 5 123456789
metriclog.close()
print 'Done.'
| mit | 2,237,672,961,989,469,700 | 39.986928 | 346 | 0.635784 | false | 3.267848 | false | false | false |
Ziqi-Li/bknqgis | bokeh/bokeh/server/server.py | 1 | 10467 | ''' Provides a Server which instantiates Application instances as clients connect
'''
from __future__ import absolute_import, print_function
import atexit
import logging
log = logging.getLogger(__name__)
import signal
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado import netutil
from .tornado import BokehTornado
from bokeh import __version__
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
def _create_hosts_whitelist(host_list, port):
if not host_list:
return ['localhost:' + str(port)]
hosts = []
for host in host_list:
if '*' in host:
log.warning(
"Host wildcard %r will allow websocket connections originating "
"from multiple (or possibly all) hostnames or IPs. Use non-wildcard "
"values to restrict access explicitly", host)
if host == '*':
# do not append the :80 port suffix in that case: any port is
# accepted
hosts.append(host)
continue
parts = host.split(':')
if len(parts) == 1:
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host+":80")
elif len(parts) == 2:
try:
int(parts[1])
except ValueError:
raise ValueError("Invalid port in host value: %s" % host)
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host)
else:
raise ValueError("Invalid host value: %s" % host)
return hosts
def _bind_sockets(address, port):
'''Like tornado.netutil.bind_sockets(), but also returns the
assigned port number.
'''
ss = netutil.bind_sockets(port=port or 0, address=address)
assert len(ss)
ports = {s.getsockname()[1] for s in ss}
assert len(ports) == 1, "Multiple ports assigned??"
actual_port = ports.pop()
if port:
assert actual_port == port
return ss, actual_port
class Server(object):
''' A Server which creates a new Session for each connection, using an Application to initialize each Session.
Args:
applications (dict of str: bokeh.application.Application) or bokeh.application.Application:
mapping from URL paths to Application instances, or a single Application to put at the root URL
The Application is a factory for Document, with a new Document initialized for each Session.
Each application should be identified by a path meant to go in a URL, like "/" or "/foo"
Kwargs:
num_procs (str):
Number of worker processes for an app. Default to one. Using 0 will autodetect number of cores
tornado_server_kwargs (dict):
Additional arguments passed to tornado.httpserver.HTTPServer. E.g. max_buffer_size to
specify the maximum upload size. More details can be found at:
http://www.tornadoweb.org/en/stable/httpserver.html#http-server
'''
def __init__(self, applications, io_loop=None, tornado_server_kwargs=None, **kwargs):
log.info("Starting Bokeh server version %s (running on Tornado %s)" % (__version__, tornado.version))
if isinstance(applications, Application):
self._applications = { '/' : applications }
else:
self._applications = applications
tornado_kwargs = { key: kwargs[key] for key in ['extra_patterns',
'secret_key',
'sign_sessions',
'generate_session_ids',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
]
if key in kwargs }
prefix = kwargs.get('prefix')
if prefix is None:
prefix = ""
prefix = prefix.strip("/")
if prefix:
prefix = "/" + prefix
self._prefix = prefix
self._started = False
self._stopped = False
port = kwargs.get('port', DEFAULT_SERVER_PORT)
self._address = kwargs.get('address') or None
if tornado_server_kwargs is None:
tornado_server_kwargs = {}
tornado_server_kwargs.setdefault('xheaders', kwargs.get('use_xheaders', False))
self._num_procs = kwargs.get('num_procs', 1)
if self._num_procs != 1:
assert all(app.safe_to_fork for app in self._applications.values()), (
'User code has ran before attempting to run multiple '
'processes. This is considered an unsafe operation.')
sockets, self._port = _bind_sockets(self._address, port)
try:
tornado_kwargs['extra_websocket_origins'] = _create_hosts_whitelist(kwargs.get('allow_websocket_origin'), self._port)
tornado_kwargs['use_index'] = kwargs.get('use_index', True)
tornado_kwargs['redirect_root'] = kwargs.get('redirect_root', True)
self._tornado = BokehTornado(self._applications, self.prefix, **tornado_kwargs)
self._http = HTTPServer(self._tornado, **tornado_server_kwargs)
self._http.start(self._num_procs)
self._http.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only instantiate the IO loop after HTTPServer.start() was
# called because of `num_procs`, see issue #5524
if io_loop is None:
io_loop = IOLoop.current()
self._loop = io_loop
self._tornado.initialize(io_loop=io_loop, **tornado_kwargs)
@property
def port(self):
'''The actual port number the server is listening on for HTTP
requests.
'''
return self._port
@property
def address(self):
'''The address the server is listening on for HTTP requests
(may be empty or None).
'''
return self._address
@property
def prefix(self):
return self._prefix
@property
def io_loop(self):
return self._loop
def start(self):
''' Start the Bokeh Server and its background tasks.
Notes:
This method does not block and does not affect the state of
the Tornado I/O loop. You must start and stop the loop yourself.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
Args:
fast (boolean): whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
def unlisten(self):
'''Stop listening on ports (Server will no longer be usable after calling this)
Returns:
None
'''
self._http.close_all_connections()
self._http.stop()
def get_session(self, app_path, session_id):
'''Gets a session by name (session must already exist)'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path=None):
'''Gets all live sessions for an application.'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
Useful for testing server applications on your local desktop but
should not call when running bokeh-server on an actual server.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path)
from bokeh.util.browser import view
view(url, browser=browser, new=new)
| gpl-2.0 | 7,007,244,162,705,073,000 | 35.217993 | 129 | 0.572179 | false | 4.513583 | false | false | false |
jelly/calibre | src/calibre/db/cli/cmd_catalog.py | 2 | 3866 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format
from calibre.db.cli import integers_from_string
readonly = True
version = 0 # change this if you change signature of implementation()
needs_srv_ctx = True
no_remote = True
def implementation(db, notify_changes, ctx):
raise NotImplementedError()
def option_parser(get_parser, args): # {{{
def add_plugin_parser_options(fmt, parser):
# Fetch the extension-specific CLI options from the plugin
# library.catalogs.<format>.py
plugin = plugin_for_catalog_format(fmt)
p = parser.add_option_group(_('{} OPTIONS').format(fmt.upper()))
for option in plugin.cli_options:
if option.action:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
action=option.action,
help=option.help
)
else:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
help=option.help
)
# Entry point
parser = get_parser(
_(
'''\
%prog catalog /path/to/destination.(csv|epub|mobi|xml...) [options]
Export a catalog in format specified by path/to/destination extension.
Options control how entries are displayed in the generated catalog output.
Note that different catalog formats support different sets of options.
'''
)
)
# Add options common to all catalog plugins
parser.add_option(
'-i',
'--ids',
default=None,
dest='ids',
help=_(
"Comma-separated list of database IDs to catalog.\n"
"If declared, --search is ignored.\n"
"Default: all"
)
)
parser.add_option(
'-s',
'--search',
default=None,
dest='search_text',
help=_(
"Filter the results by the search query. "
"For the format of the search query, please see "
"the search-related documentation in the User Manual.\n"
"Default: no filtering"
)
)
parser.add_option(
'-v',
'--verbose',
default=False,
action='store_true',
dest='verbose',
help=_('Show detailed output information. Useful for debugging')
)
fmt = 'epub'
if args and '.' in args[0]:
fmt = args[0].rpartition('.')[-1].lower()
if fmt not in available_catalog_formats():
fmt = 'epub'
# Add options specific to fmt plugin
add_plugin_parser_options(fmt, parser)
return parser
# }}}
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('You must specify a catalog output file'))
if opts.ids:
opts.ids = list(integers_from_string(opts.ids))
fmt = args[0].rpartition('.')[-1]
if fmt not in available_catalog_formats():
raise SystemExit(
_('Cannot generate a catalog in the {} format').format(fmt.upper())
)
# No support for connected device in CLI environment
# Parallel initialization in calibre.gui2.tools:generate_catalog()
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
dest = os.path.abspath(os.path.expanduser(args[0]))
plugin = plugin_for_catalog_format(fmt)
with plugin:
plugin.run(dest, opts, dbctx.db)
return 0
| gpl-3.0 | 3,301,390,566,288,786,000 | 28.51145 | 85 | 0.579152 | false | 4.170442 | false | false | false |
geotagx/geotagx-pybossa-archive | pybossa/auth/task.py | 1 | 1535 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask.ext.login import current_user
import pybossa.model as model
from pybossa.core import db
def create(task=None):
if not current_user.is_anonymous():
app = db.session.query(model.App).filter_by(id=task.app_id).one()
if app.owner_id == current_user.id or current_user.admin is True:
return True
else:
return False
else:
return False
def read(task=None):
return True
def update(task):
if not current_user.is_anonymous():
app = db.session.query(model.App).filter_by(id=task.app_id).one()
if app.owner_id == current_user.id or current_user.admin is True:
return True
else:
return False
else:
return False
def delete(task):
return update(task)
| agpl-3.0 | -5,745,328,043,428,878,000 | 29.098039 | 77 | 0.683388 | false | 3.725728 | false | false | false |
MasterGowen/moonrain | moonrain/accounts/models.py | 1 | 2939 | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from ..projects.models import Project
class UserManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Необходимо ввести электронный адрес')
user = self.model(
email=UserManager.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(email,
password=password,
username=username)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
'''
Пользователь
'''
email = models.EmailField(
verbose_name='Электронная почта',
max_length=32,
unique=True,
db_index=True,
)
username = models.CharField(
verbose_name='Имя пользователя',
blank=False,
max_length=32,
unique=True,
)
avatar = models.ImageField(
verbose_name='Аватар',
upload_to='images/%Y/%m',
blank=True,
)
first_name = models.CharField(
verbose_name='Имя',
max_length=16,
blank=True,
)
last_name = models.CharField(
verbose_name='Фамилия',
max_length=32,
blank=True,
)
department = models.CharField(
verbose_name='Подразделение',
max_length=255,
blank=True,
)
is_admin = models.BooleanField(
verbose_name='Является администратором?',
default=False,
)
is_superuser = models.BooleanField(
verbose_name='Является суперпользователем?',
default=False,
)
projects = models.ManyToManyField(Project, verbose_name='Проекты',
blank=True,
help_text='Проекты, в которых участвует пользователь',)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
def get_full_name(self):
return '%s %s' % (self.last_name,
self.first_name,)
def get_short_name(self):
return self.username
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
class Meta:
verbose_name = ('Пользователь')
verbose_name_plural = ('Пользователи') | gpl-2.0 | 7,740,447,189,795,986,000 | 23.070796 | 93 | 0.573005 | false | 3.433081 | false | false | false |
agaveapi/SC17-container-tutorial | content/images/jupyter/examples/setvars.py | 1 | 2421 | # Here we define some utility commands to simplify interaction with the shell.
# You don't need to read or understand this, but it's here in case you want to.
import re
import os
def repvar(v):
"""
repvar() is short for "Replace Variables." The idea is that this
function looks for strings of the form $VAR or ${VAR} or even
$(CMD) in the input string and replaces them, either with
the contents of os.environ[VAR] or os.pipe(CMD), mimicking the
behavior of bash. If a backslace precedes the $, then the backslash
will be removed but the string will not be evaluated. Thus:
${HOME} becomes "/home/user"
$HOME becomes "/home/usr"
$(echo Hello) becomes "Hello"
\$HOME becomes $HOME
"""
epos = 0
buf = ''
for g in re.finditer(r'\$((\w+)|\{([^}]*)\}|\(([^())]*)\))|(\\+\$)',v):
if g:
i = 2
while g.group(i) == None:
i += 1
p = g.start(0)
buf += v[epos:p]
epos = p + len(g.group(0))
if i == 4:
fh = os.popen(g.group(i),"r")
c = repvar(fh.read())
fh.close()
elif i == 5:
c = '$'
else:
if not g.group(i) in os.environ:
raise Exception("no such environment variable: "+g.group(i))
c = repvar(os.environ[g.group(i)])
buf += c
else:
break
buf += v[epos:]
return buf.strip()
def setvar(e):
"""
setvar() emulates the ability of BASH to set environment variables.
Thus, NAME=VALUE will set os.environ["NAME"]="VALUE". Bash-style
comments will be stripped, and bash-line continuations will be processed.
"""
e = re.sub(r'#[^\r\n]*','',e)
e = re.sub(r'\\\n\s*','',e)
for m in re.finditer(r'(?m)(\w+)=(.*)',e):
k = m.group(1)
v = repvar(m.group(2))
print(k+"="+v)
os.environ[k]=v
def readfile(f):
"""
Reads in a file. repvar() will be applied to the file name.
"""
n = repvar(f)
print("Reading file `"+n+"'")
fh = open(n)
c = fh.read()
fh.close()
return c
def writefile(f,c):
"""
Writes out a file. repvar() will be applied both to the file name
and the file contents.
"""
n = repvar(f)
print("Writing file `"+n+"'")
fh = open(n,"w")
fh.write(repvar(c))
fh.close()
| bsd-3-clause | 6,554,591,777,941,709,000 | 31.28 | 80 | 0.523337 | false | 3.3625 | false | false | false |
Subsets and Splits