repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
diana-hep/carl | tests/distributions/test_join.py | 1 | 1471 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
from numpy.testing import assert_array_almost_equal
from carl.distributions import Join
from carl.distributions import Normal
from carl.distributions import Histogram
def test_join():
p = Join(components=[Normal(mu=0), Normal(mu=1), Normal(mu=2)])
assert p.ndim == 3
assert len(p.parameters_) == 6
X = p.rvs(10000, random_state=1)
assert X.shape == (10000, 3)
assert np.abs(np.mean(X[:, 0]) - 0.) < 0.05
assert np.abs(np.mean(X[:, 1]) - 1.) < 0.05
assert np.abs(np.mean(X[:, 2]) - 2.) < 0.05
assert_array_almost_equal(-np.log(p.pdf(X)), p.nll(X))
def test_join_non_theano():
h0 = Histogram(interpolation="linear", bins=30)
h1 = Histogram(interpolation="linear", bins=30)
h2 = Histogram(interpolation="linear", bins=30)
h0.fit(Normal(mu=0).rvs(10000, random_state=0))
h1.fit(Normal(mu=1).rvs(10000, random_state=1))
h2.fit(Normal(mu=2).rvs(10000, random_state=2))
p = Join(components=[h0, h1, h2])
assert p.ndim == 3
assert len(p.parameters_) == 0
X = p.rvs(10000, random_state=1)
assert X.shape == (10000, 3)
assert np.abs(np.mean(X[:, 0]) - 0.) < 0.05
assert np.abs(np.mean(X[:, 1]) - 1.) < 0.05
assert np.abs(np.mean(X[:, 2]) - 2.) < 0.05
assert_array_almost_equal(-np.log(p.pdf(X)), p.nll(X))
| bsd-3-clause | 550,063,724,693,675,600 | 31.688889 | 67 | 0.635622 | false |
Azure/azure-sdk-for-python | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/data_lake_analytics_account_basic_py3.py | 1 | 3694 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class DataLakeAnalyticsAccountBasic(Resource):
"""A Data Lake Analytics account object, containing all information associated
with the named Data Lake Analytics account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource identifer.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar account_id: The unique identifier associated with this Data Lake
Analytics account.
:vartype account_id: str
:ivar provisioning_state: The provisioning status of the Data Lake
Analytics account. Possible values include: 'Failed', 'Creating',
'Running', 'Succeeded', 'Patching', 'Suspending', 'Resuming', 'Deleting',
'Deleted', 'Undeleting', 'Canceled'
:vartype provisioning_state: str or
~azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccountStatus
:ivar state: The state of the Data Lake Analytics account. Possible values
include: 'Active', 'Suspended'
:vartype state: str or
~azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccountState
:ivar creation_time: The account creation time.
:vartype creation_time: datetime
:ivar last_modified_time: The account last modified time.
:vartype last_modified_time: datetime
:ivar endpoint: The full CName endpoint for this account.
:vartype endpoint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'tags': {'readonly': True},
'account_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'creation_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'account_id': {'key': 'properties.accountId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'DataLakeAnalyticsAccountStatus'},
'state': {'key': 'properties.state', 'type': 'DataLakeAnalyticsAccountState'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'endpoint': {'key': 'properties.endpoint', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(DataLakeAnalyticsAccountBasic, self).__init__(**kwargs)
self.account_id = None
self.provisioning_state = None
self.state = None
self.creation_time = None
self.last_modified_time = None
self.endpoint = None
| mit | 5,611,381,449,626,307,000 | 40.977273 | 112 | 0.615864 | false |
jpirko/lnst | lnst/Recipes/ENRT/ConfigMixins/OffloadSubConfigMixin.py | 1 | 4345 | import copy
from lnst.Common.Parameters import Param
from lnst.Controller.RecipeResults import ResultLevel
from lnst.Recipes.ENRT.ConfigMixins.BaseSubConfigMixin import BaseSubConfigMixin
class OffloadSubConfigMixin(BaseSubConfigMixin):
"""
This class is an extension to the :any:`BaseEnrtRecipe` class that enables
offload configuration on the devices defined by :attr:`offload_nics`.
:param offload_combinations:
(optional test parameter) defines the offload features to be enabled
or disabled on the devices
"""
offload_combinations = Param(
default=(dict(gro="on", gso="on", tso="on", tx="on", rx="on"),)
)
@property
def offload_nics(self):
"""
The value of this property is a list of devices for which the offload
settings should be configured. It has to be defined by a derived class.
"""
raise NotImplementedError("Subclass must implement this property")
def generate_sub_configurations(self, config):
for parent_config in super().generate_sub_configurations(config):
for offload_settings in self.params.offload_combinations:
new_config = copy.copy(config)
new_config.offload_settings = offload_settings
yield new_config
def apply_sub_configuration(self, config):
super().apply_sub_configuration(config)
offload_settings = getattr(config, "offload_settings", None)
if offload_settings:
ethtool_offload_string = ""
for name, value in list(offload_settings.items()):
ethtool_offload_string += " %s %s" % (name, value)
for nic in self.offload_nics:
if "sctp_stream" in self.params.perf_tests:
nic.netns.run(
"iptables -I OUTPUT ! -o %s -p sctp -j DROP" % nic.name,
job_level=ResultLevel.NORMAL,
)
nic.netns.run(
"ethtool -K {} {}".format(nic.name, ethtool_offload_string),
job_level=ResultLevel.NORMAL,
)
def generate_sub_configuration_description(self, config):
description = super().generate_sub_configuration_description(config)
description.append(
"Currently configured offload combination: {}".format(
" ".join(
[
"{}={}".format(k, v)
for k, v in config.offload_settings.items()
]
)
)
)
return description
def remove_sub_configuration(self, config):
offload_settings = getattr(config, "offload_settings", None)
if offload_settings:
ethtool_offload_string = ""
for name, value in list(offload_settings.items()):
ethtool_offload_string += " %s %s" % (name, "on")
for nic in self.offload_nics:
if "sctp_stream" in self.params.perf_tests:
nic.netns.run(
"iptables -D OUTPUT ! -o %s -p sctp -j DROP" % nic.name,
job_level=ResultLevel.NORMAL,
)
# set all the offloads back to 'on' state
nic.netns.run(
"ethtool -K {} {}".format(nic.name, ethtool_offload_string),
job_level=ResultLevel.NORMAL,
)
return super().remove_sub_configuration(config)
def generate_flow_combinations(self, config):
for flows in super().generate_flow_combinations(config):
if self._check_test_offload_conflicts(config, flows):
# TODO log skip
continue
else:
yield flows
def _check_test_offload_conflicts(self, config, flows):
for flow in flows:
if (
flow.type == "udp_stream"
and config.offload_settings.get("gro", "on") == "off"
):
return True
elif (
flow.type == "sctp_stream"
and "off" in config.offload_settings.values()
and config.offload_settings.get("gso", "on") == "on"
):
return True
return False
| gpl-2.0 | 4,285,374,703,023,726,600 | 36.456897 | 80 | 0.551208 | false |
Mugginz/Thermofun | thermostat/subroutine.py | 1 | 2625 | # Helper functions for controller
import time, os, sys, threading, socket
THERMOMETER_URI = '/sys/bus/w1/devices/28-0000054b97a5/w1_slave'
LOGFILE = 'incidents.log'
# Change argument to 'w' to clear logs on startup.
with open(LOGFILE, 'a') as f:
pass
def eventLog(message):
try:
# Limit file size.
with open(LOGFILE, 'r+') as f:
line_count = sum(1 for line in f)
if line_count > 1023:
f.seek(0)
for i in range(line_count - 1023):
f.readline()
remainder = f.read()
f.seek(0)
f.write(remainder)
f.truncate()
entry = message + " @ " + time.strftime("%Y-%m-%d, %H:%M:%S") + "\n"
with open(LOGFILE, 'a+') as f:
f.write(entry)
except EnvironmentError:
return 1
return 0
# Use sysfs to read thermometer.
def getTemperature(dbg):
temperature = None
try:
with open(THERMOMETER_URI, 'r') as poll:
measure = poll.readline()
if measure.split()[11] == "YES":
measure = poll.readline()
temperature = (float(measure.split("t=")[1])) / 1000
if temperature > 80:
if dbg:
print("Thermometer error value " + str(temperature) + " reported.")
temperature = None
except EnvironmentError as e:
if dbg:
print(str(e))
except Exception as e:
if dbg:
print("Thermometer event, check logs.")
eventLog(str(e))
return temperature
# For loading thermal profile settings:
from flask_sqlalchemy import SQLAlchemy
# Share db with flask app.
# TODO:
# make absolute path
sys.path.append(os.path.dirname(os.getcwd()))
from control_panel import db, models
# Maybe not the best way to do this.
def getSchedules(dgb):
timetable = []
for i in range(3):
try:
profile_active = models.Profile.query.filter_by(active=True).first()
schedules = profile_active.schedules.all()
# except SQLAlchemy.SQLAlchemyError as e:
# time.sleep(3)
except Exception as e:
time.sleep(3)
else:
for s in schedules:
timetable.append((s.time, s.temperature))
break
else:
eventLog(str(e))
if dbg:
print("Database event, check logs.")
return timetable
# Listen for changes to settings.
# msg should be an empty dictionary
def getNotification(soc, msg, lck, dbg):
while 1:
conn, addr = soc.accept()
if dbg:
print("Connected to " + str(addr[0]) + ":" + str(addr[1]))
try:
data = conn.recv(256)
except Exception as e:
if dbg:
print("Network event, check logs.")
eventLog(str(e))
else:
clean = data.strip()
settings = clean.split(' ')
lck.acquire(True)
msg[settings[0]] = settings[1]
lck.release()
if dbg:
print("Successfully received data.\n")
conn.shutdown(socket.SHUT_RD)
conn.close()
return 0
| gpl-2.0 | -4,010,749,887,447,146,000 | 24.240385 | 72 | 0.663238 | false |
glabilloy/fabrydb | fabrydb/conf/settings.py | 1 | 1697 | import os
from global_settings import *
try:
from local_settings import *
from local_settings_secret import *
except ImportError:
import warnings
warnings.warn('Local settings have not been found (src.conf.local_settings). Trying to import Heroku config...')
try:
from local_settings_heroku import *
from local_settings_heroku_secret import *
warnings.warn('Local Heroku config loaded')
except ImportError:
warnings.warn('Heroku local settings not found neither (src.conf.local_settings_heroku)')
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this alteration
if FORCE_SCRIPT_NAME:
ADMIN_MEDIA_PREFIX = os.path.join(FORCE_SCRIPT_NAME, ADMIN_MEDIA_PREFIX[1:])
STATIC_URL = os.path.join(FORCE_SCRIPT_NAME, STATIC_URL[1:])
MEDIA_URL = os.path.join(FORCE_SCRIPT_NAME, MEDIA_URL[1:])
LOGIN_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_URL[1:])
LOGOUT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGOUT_URL[1:])
LOGIN_REDIRECT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_REDIRECT_URL[1:])
# This is used as a "seed" for various hashing algorithms. This must be set to
# a very long random string (40+ characters)
SECRET_KEY = 'read from secret settings'
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| bsd-2-clause | -1,389,080,983,223,519,000 | 33.632653 | 116 | 0.713612 | false |
dibaunaumh/tikal-corp-website | cms/menu.py | 1 | 8499 | from menus.menu_pool import menu_pool
from menus.base import Menu, NavigationNode, Modifier
from cms.utils import get_language_from_request
from cms.utils.moderator import get_page_queryset, get_title_queryset
from django.conf import settings
from django.contrib.sites.models import Site
from cms.utils.i18n import get_fallback_languages
from cms.exceptions import NoHomeFound
from cms.apphook_pool import apphook_pool
from cms.models.titlemodels import Title
def page_to_node(page, home, cut):
parent_id = page.parent_id
if home and page.parent_id == home.pk and cut:
parent_id = None
# possible fix for a possible problem
#if parent_id and not page.parent.get_calculated_status():
# parent_id = None # ????
attr = {'soft_root':page.soft_root,
'auth_required':page.login_required,
'reverse_id':page.reverse_id,}
if page.limit_visibility_in_menu == None:
attr['visible_for_authenticated'] = True
attr['visible_for_anonymous'] = True
else:
attr['visible_for_authenticated'] = page.limit_visibility_in_menu == 1
attr['visible_for_anonymous'] = page.limit_visibility_in_menu == 2
if page.pk == home.pk:
attr['is_home'] = True
extenders = []
if page.navigation_extenders:
extenders.append(page.navigation_extenders)
try:
app_name = page.get_application_urls(fallback=False)
except Title.DoesNotExist:
app_name = None
if app_name:
app = apphook_pool.get_apphook(app_name)
for menu in app.menus:
extenders.append(menu.__name__)
attr['redirect_url'] = page.get_redirect() # save redirect URL is any
if extenders:
attr['navigation_extenders'] = extenders
n = NavigationNode(
page.get_menu_title(),
page.get_absolute_url(),
page.pk,
parent_id,
attr=attr,
visible=page.in_navigation,
)
return n
class CMSMenu(Menu):
def get_nodes(self, request):
page_queryset = get_page_queryset(request)
site = Site.objects.get_current()
lang = get_language_from_request(request)
filters = {
'site':site,
}
if settings.CMS_HIDE_UNTRANSLATED:
filters['title_set__language'] = lang
pages = page_queryset.published().filter(**filters).order_by("tree_id", "lft")
ids = []
nodes = []
first = True
home_cut = False
home_children = []
home = None
for page in pages:
if not home:
home = page
page.home_pk_cache = home.pk
if first and page.pk != home.pk:
home_cut = True
if (page.parent_id == home.pk or page.parent_id in home_children) and home_cut:
page.home_cut_cache = True
home_children.append(page.pk)
if (page.pk == home.pk and home.in_navigation) or page.pk != home.pk:
first = False
ids.append(page.id)
titles = list(get_title_queryset(request).filter(page__in=ids, language=lang))
for page in pages:# add the title and slugs and some meta data
for title in titles:
if title.page_id == page.pk:
if not hasattr(page, "title_cache"):
page.title_cache = {}
page.title_cache[title.language] = title
nodes.append(page_to_node(page, home, home_cut))
ids.remove(page.pk)
if ids: # get fallback languages
fallbacks = get_fallback_languages(lang)
for l in fallbacks:
titles = list(get_title_queryset(request).filter(page__in=ids, language=l))
for title in titles:
for page in pages:# add the title and slugs and some meta data
if title.page_id == page.pk:
if not hasattr(page, "title_cache"):
page.title_cache = {}
page.title_cache[title.language] = title
nodes.append(page_to_node(page, home, home_cut))
ids.remove(page.pk)
break
if not ids:
break
return nodes
menu_pool.register_menu(CMSMenu)
class NavExtender(Modifier):
def modify(self, request, nodes, namespace, id, post_cut, breadcrumb):
if post_cut:
return nodes
exts = []
# rearrange the parent relations
home = None
for node in nodes:
if node.attr.get("is_home", False):
home = node
extenders = node.attr.get("navigation_extenders", None)
if extenders:
for ext in extenders:
if not ext in exts:
exts.append(ext)
for n in nodes:
if n.namespace == ext and not n.parent_id:# if home has nav extenders but home is not visible
if node.attr.get("is_home", False) and not node.visible:
n.parent_id = None
n.parent_namespace = None
n.parent = None
else:
n.parent_id = node.id
n.parent_namespace = node.namespace
n.parent = node
node.children.append(n)
removed = []
# find all not assigned nodes
for menu in menu_pool.menus.items():
if hasattr(menu[1], 'cms_enabled') and menu[1].cms_enabled and not menu[0] in exts:
for node in nodes:
if node.namespace == menu[0]:
removed.append(node)
if breadcrumb:
# if breadcrumb and home not in navigation add node
if breadcrumb and home and not home.visible:
home.visible = True
if request.path == home.get_absolute_url():
home.selected = True
else:
home.selected = False
# remove all nodes that are nav_extenders and not assigned
for node in removed:
nodes.remove(node)
return nodes
menu_pool.register_modifier(NavExtender)
class SoftRootCutter(Modifier):
def modify(self, request, nodes, namespace, id, post_cut, breadcrumb):
if post_cut or not settings.CMS_SOFTROOT:
return nodes
selected = None
root_nodes = []
for node in nodes:
if node.selected:
selected = node
if not node.parent:
root_nodes.append(node)
if selected:
if selected.attr.get("soft_root", False):
nodes = selected.get_descendants()
selected.parent = None
nodes = [selected] + nodes
else:
nodes = self.find_ancestors(selected, nodes)
nodes = self.find_children(selected, nodes)
else:
for node in root_nodes:
self.find_children(node, nodes)
return nodes
def find_children(self, node, nodes):
for n in node.children:
if n.attr.get("soft_root", False):
self.remove_children(n, nodes)
return nodes
def remove_children(self, node, nodes):
for n in node.children:
nodes.remove(n)
self.remove_children(n, nodes)
node.children = []
def find_ancestors(self, node, nodes):
is_root = False
if node.parent:
if node.parent.attr.get("soft_root", False):
is_root = True
nodes = node.parent.get_descendants()
node.parent.parent = None
nodes = [node.parent] + nodes
else:
nodes = self.find_ancestors(node.parent, nodes)
else:
for n in nodes:
if n != node and not n.parent:
self.find_children(n, nodes)
for n in node.children:
if n != node:
self.find_children(n, nodes)
if is_root:
n.parent = None
return nodes
menu_pool.register_modifier(SoftRootCutter)
| bsd-3-clause | -5,967,053,210,868,146,000 | 37.986239 | 117 | 0.53218 | false |
AdirShemesh/LibraryWiki | app/node_entities.py | 1 | 3155 | import json
from app.authorities import CODES
from requests import get
import xmltodict
class Entity:
def __init__(self, data):
self.data = data
self.properties = self._build_properties()
self.labels = self._build_labels()
def _build_properties(self):
raise NotImplemented
def _build_labels(self):
raise NotImplemented
class Authority(Entity):
def _build_properties(self):
# assign id + dumps the entire xml record in 'data' field
properties = {'data': json.dumps(self.data), 'id': self.data['001'][0]['#text']}
# assigns type
if '100' in self.data:
properties['type'] = 'Person'
elif '151' in self.data:
properties['type'] = 'Location'
elif '150' in self.data:
properties['type'] = 'Topic'
else:
properties['type'] = None
for tag, subfields in self.data.items():
for subfield in subfields:
CODES.get(tag) and properties.update(CODES[tag](subfield))
return properties
def _build_labels(self):
authority_type = self.properties['type']
if authority_type:
return 'Authority', authority_type
return 'Authority',
class Record(Entity):
def _build_properties(self):
return {'id': self.data['control']['recordid'], 'data': str(self.data),
'title': self.data['display']['title']}
def _build_labels(self):
return 'Record', self.data['display']['type']
class Photo(Record):
def __init__(self, data):
self.data = data
self._fl_url = "http://aleph.nli.org.il/X?op=find-doc&doc_num={}&base={}"
self._fl_url = self._build_fl_url()
super().__init__(data)
@property
def _fl_base(self):
return 'nnl03'
def _build_fl_url(self):
return self._fl_url.format(self.data['control']['sourcerecordid'], self._fl_base)
def _build_properties(self):
properties = super()._build_properties()
fl = self._get_fl()
if fl:
properties["fl"] = fl
return properties
def _build_labels(self):
return super()._build_labels() + ('Photo',)
def _get_fl(self):
fl = None
fields = xmltodict.parse(get(self._fl_url).content)['find-doc'].get('record')
if not fields:
return None
fields = fields['metadata']['oai_marc']['varfield']
for field in fields:
if not isinstance(field, dict) or not field.get('@id'):
continue
if field['@id'] == 'ROS':
fl = [sub['#text'] for sub in field['subfield'] if sub.get('@label') == 'd'] or None
break
return fl and fl[0]
class Portrait(Photo):
def _build_properties(self):
properties = super()._build_properties()
topic = self.data['facets'].get('topic')
if topic:
properties['topic'] = topic
return properties
@property
def _fl_base(self):
return 'nnl01'
def _build_labels(self):
return super()._build_labels() + ('Portrait',)
| gpl-2.0 | 9,033,349,775,582,352,000 | 28.485981 | 100 | 0.562282 | false |
cloudify-cosmo/cloudify-plugins-common | cloudify/utils.py | 1 | 15485 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from contextlib import contextmanager
import logging
import os
import random
import shlex
import ssl
import string
import subprocess
import sys
import tempfile
import traceback
import StringIO
from distutils.version import LooseVersion
from cloudify import cluster, constants
from cloudify.state import workflow_ctx, ctx
from cloudify.exceptions import CommandExecutionException
CFY_EXEC_TEMPDIR_ENVVAR = 'CFY_EXEC_TEMP'
class ManagerVersion(object):
"""Cloudify manager version helper class."""
def __init__(self, raw_version):
"""Raw version, for example: 3.4.0-m1, 3.3, 3.2.1, 3.3-rc1."""
components = []
for x in raw_version.split('-')[0].split('.'):
try:
components += [int(x)]
except ValueError:
pass
if len(components) == 2:
components.append(0)
self.major = components[0]
self.minor = components[1]
self.service = components[2]
def greater_than(self, other):
"""Returns true if this version is greater than the provided one."""
if self.major > other.major:
return True
if self.major == other.major:
if self.minor > other.minor:
return True
if self.minor == other.minor and self.service > other.service:
return True
return False
def equals(self, other):
"""Returns true if this version equals the provided version."""
return self.major == other.major and self.minor == other.minor and \
self.service == other.service
def __str__(self):
return '{0}.{1}.{2}'.format(self.major, self.minor, self.service)
def __eq__(self, other):
return self.equals(other)
def __gt__(self, other):
return self.greater_than(other)
def __lt__(self, other):
return other > self
def __ge__(self, other):
return self > other or self == other
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return self > other or self < other
def setup_logger(logger_name,
logger_level=logging.INFO,
handlers=None,
remove_existing_handlers=True,
logger_format=None,
propagate=True):
"""
:param logger_name: Name of the logger.
:param logger_level: Level for the logger (not for specific handler).
:param handlers: An optional list of handlers (formatter will be
overridden); If None, only a StreamHandler for
sys.stdout will be used.
:param remove_existing_handlers: Determines whether to remove existing
handlers before adding new ones
:param logger_format: the format this logger will have.
:param propagate: propagate the message the parent logger.
:return: A logger instance.
:rtype: logging.Logger
"""
if logger_format is None:
logger_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s'
logger = logging.getLogger(logger_name)
if remove_existing_handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
if not handlers:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handlers = [handler]
formatter = logging.Formatter(fmt=logger_format,
datefmt='%H:%M:%S')
for handler in handlers:
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logger_level)
if not propagate:
logger.propagate = False
return logger
def format_exception(e):
"""Human-readable representation of an exception, as a bytestring.
The canonical way to print an exception, str(e), also made to handle
unicode exception messages in python 2.
Additionally, if the exception message is incompatible with utf-8,
(which should only happen in extreme cases, such as NUL bytes),
fallback to repr().
"""
try:
return str(e)
except UnicodeEncodeError:
try:
return unicode(e).encode('utf-8')
except UnicodeEncodeError:
return repr(e)
def get_manager_file_server_url():
"""
Returns the manager file server base url.
"""
if cluster.is_cluster_configured():
active_node_ip = cluster.get_cluster_active()
port = get_manager_rest_service_port()
if active_node_ip:
return 'https://{0}:{1}/resources'.format(active_node_ip, port)
return os.environ[constants.MANAGER_FILE_SERVER_URL_KEY]
def get_manager_file_server_root():
"""
Returns the host the manager REST service is running on.
"""
return os.environ[constants.MANAGER_FILE_SERVER_ROOT_KEY]
def get_manager_rest_service_host():
"""
Returns the host the manager REST service is running on.
"""
return os.environ[constants.REST_HOST_KEY]
def get_broker_ssl_cert_path():
"""
Returns location of the broker certificate on the agent
"""
if cluster.is_cluster_configured():
active_node = cluster.get_cluster_active() or {}
broker_ssl_cert_path = active_node.get('internal_cert_path')
if broker_ssl_cert_path:
return broker_ssl_cert_path
return os.environ[constants.BROKER_SSL_CERT_PATH]
# maintained for backwards compatibility
get_manager_ip = get_manager_rest_service_host
def get_manager_rest_service_port():
"""
Returns the port the manager REST service is running on.
"""
return int(os.environ[constants.REST_PORT_KEY])
def get_local_rest_certificate():
"""
Returns the path to the local copy of the server's public certificate
"""
return os.environ[constants.LOCAL_REST_CERT_FILE_KEY]
def _get_current_context():
for context in [ctx, workflow_ctx]:
try:
return context._get_current_object()
except RuntimeError:
continue
raise RuntimeError('Context required, but no operation or workflow '
'context available.')
def get_rest_token():
"""
Returns the auth token to use when calling the REST service
"""
return _get_current_context().rest_token
def get_tenant():
"""
Returns a dict with the details of the current tenant
"""
return _get_current_context().tenant
def get_tenant_name():
"""
Returns the tenant name to use when calling the REST service
"""
return _get_current_context().tenant_name
def get_is_bypass_maintenance():
"""
Returns true if workflow should run in maintenance mode.
"""
return os.environ.get(constants.BYPASS_MAINTENANCE, '').lower() == 'true'
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
Generate and return a random string using upper case letters and digits.
"""
return ''.join(random.choice(chars) for _ in range(size))
def get_exec_tempdir():
"""
Returns the directory to use for temporary files, when the intention
is to place an executable file there.
This is needed because some production systems disallow executions from
the default temporary directory.
"""
return os.environ.get(CFY_EXEC_TEMPDIR_ENVVAR) or tempfile.gettempdir()
def create_temp_folder():
"""
Create a temporary folder.
"""
path_join = os.path.join(get_exec_tempdir(), id_generator(5))
os.makedirs(path_join)
return path_join
def exception_to_error_cause(exception, tb):
error = StringIO.StringIO()
etype = type(exception)
traceback.print_exception(etype, exception, tb, file=error)
return {
'message': str(exception),
'traceback': error.getvalue(),
'type': etype.__name__
}
class LocalCommandRunner(object):
def __init__(self, logger=None, host='localhost'):
"""
:param logger: This logger will be used for
printing the output and the command.
"""
logger = logger or setup_logger('LocalCommandRunner')
self.logger = logger
self.host = host
def run(self, command,
exit_on_failure=True,
stdout_pipe=True,
stderr_pipe=True,
cwd=None,
execution_env=None):
"""
Runs local commands.
:param command: The command to execute.
:param exit_on_failure: False to ignore failures.
:param stdout_pipe: False to not pipe the standard output.
:param stderr_pipe: False to not pipe the standard error.
:param cwd: the working directory the command will run from.
:param execution_env: dictionary of environment variables that will
be present in the command scope.
:return: A wrapper object for all valuable info from the execution.
:rtype: cloudify.utils.CommandExecutionResponse
"""
if isinstance(command, list):
popen_args = command
else:
popen_args = _shlex_split(command)
self.logger.debug('[{0}] run: {1}'.format(self.host, popen_args))
stdout = subprocess.PIPE if stdout_pipe else None
stderr = subprocess.PIPE if stderr_pipe else None
command_env = os.environ.copy()
command_env.update(execution_env or {})
p = subprocess.Popen(args=popen_args, stdout=stdout,
stderr=stderr, cwd=cwd, env=command_env)
out, err = p.communicate()
if out:
out = out.rstrip()
if err:
err = err.rstrip()
if p.returncode != 0:
error = CommandExecutionException(
command=command,
error=err,
output=out,
code=p.returncode)
if exit_on_failure:
raise error
else:
self.logger.error(error)
return CommandExecutionResponse(
command=command,
std_out=out,
std_err=err,
return_code=p.returncode)
class CommandExecutionResponse(object):
"""
Wrapper object for info returned when running commands.
:param command: The command that was executed.
:param std_out: The output from the execution.
:param std_err: The error message from the execution.
:param return_code: The return code from the execution.
"""
def __init__(self, command, std_out, std_err, return_code):
self.command = command
self.std_out = std_out
self.std_err = std_err
self.return_code = return_code
setup_default_logger = setup_logger # deprecated; for backwards compatibility
def _shlex_split(command):
lex = shlex.shlex(command, posix=True)
lex.whitespace_split = True
lex.escape = ''
return list(lex)
class Internal(object):
@staticmethod
def get_install_method(properties):
install_agent = properties.get('install_agent')
if install_agent is False:
return 'none'
elif install_agent is True:
return 'remote'
else:
return properties.get('agent_config', {}).get('install_method')
@staticmethod
def get_broker_ssl_options(ssl_enabled, cert_path):
if ssl_enabled:
ssl_options = {
'ca_certs': cert_path,
'cert_reqs': ssl.CERT_REQUIRED,
}
else:
ssl_options = {}
return ssl_options
@staticmethod
def get_broker_credentials(cloudify_agent):
"""Get broker credentials or their defaults if not set."""
default_user = 'guest'
default_pass = 'guest'
default_vhost = '/'
try:
broker_user = cloudify_agent.broker_user or default_user
broker_pass = cloudify_agent.broker_pass or default_pass
broker_vhost = cloudify_agent.broker_vhost or default_vhost
except AttributeError:
# Handle non-agent from non-manager (e.g. for manual tests)
broker_user = default_user
broker_pass = default_pass
broker_vhost = default_vhost
return broker_user, broker_pass, broker_vhost
@staticmethod
def _get_package_version(plugins_dir, package_name):
# get all plugin dirs
subdirs = next(os.walk(plugins_dir))[1]
# filter by package name
package_dirs = [dir for dir in subdirs if dir.startswith(package_name)]
# cut package name prefix
versions = [dir[len(package_name) + 1:] for dir in package_dirs]
# sort versions from new to old
versions.sort(key=lambda version: LooseVersion(version), reverse=True)
# return the latest
return versions[0]
@staticmethod
def plugin_prefix(package_name=None, package_version=None,
deployment_id=None, plugin_name=None, tenant_name=None,
sys_prefix_fallback=True):
tenant_name = tenant_name or ''
plugins_dir = os.path.join(sys.prefix, 'plugins', tenant_name)
prefix = None
if package_name:
package_version = package_version or Internal._get_package_version(
plugins_dir, package_name)
wagon_dir = os.path.join(
plugins_dir, '{0}-{1}'.format(package_name, package_version))
if os.path.isdir(wagon_dir):
prefix = wagon_dir
if prefix is None and deployment_id and plugin_name:
source_dir = os.path.join(
plugins_dir, '{0}-{1}'.format(deployment_id, plugin_name))
if os.path.isdir(source_dir):
prefix = source_dir
if prefix is None and sys_prefix_fallback:
prefix = sys.prefix
return prefix
@staticmethod
@contextmanager
def _change_tenant(ctx, tenant):
"""
Temporarily change the tenant the context is pretending to be.
This is not supported for anything other than snapshot restores.
If you are thinking of using this for something, it would be
better not to.
"""
if 'original_name' in ctx._context['tenant']:
raise RuntimeError(
'Overriding tenant name cannot happen while tenant name is '
'already being overridden.'
)
try:
ctx._context['tenant']['original_name'] = ctx.tenant_name
ctx._context['tenant']['name'] = tenant
yield
finally:
ctx._context['tenant']['name'] = (
ctx._context['tenant']['original_name']
)
ctx._context['tenant'].pop('original_name')
internal = Internal()
| apache-2.0 | 3,845,092,131,709,111,300 | 30.473577 | 79 | 0.613174 | false |
bodylabs/blmath | blmath/geometry/transform/rigid_transform.py | 1 | 2624 | def find_rigid_transform(a, b, visualize=False):
"""
Args:
a: a 3xN array of vertex locations
b: a 3xN array of vertex locations
Returns: (R,T) such that R.dot(a)+T ~= b
Based on Arun et al, "Least-squares fitting of two 3-D point sets," 1987.
See also Eggert et al, "Estimating 3-D rigid body transformations: a
comparison of four major algorithms," 1997.
"""
import numpy as np
import scipy.linalg
from blmath.numerics.matlab import col
if a.shape[0] != 3:
if a.shape[1] == 3:
a = a.T
if b.shape[0] != 3:
if b.shape[1] == 3:
b = b.T
assert a.shape[0] == 3
assert b.shape[0] == 3
a_mean = np.mean(a, axis=1)
b_mean = np.mean(b, axis=1)
a_centered = a - col(a_mean)
b_centered = b - col(b_mean)
c = a_centered.dot(b_centered.T)
u, s, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if scipy.linalg.det(R) < 0:
if np.any(s == 0): # This is only valid in the noiseless case; see the paper
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
else:
raise ValueError("find_rigid_transform found a reflection that it cannot recover from. Try RANSAC or something...")
T = col(b_mean - R.dot(a_mean))
if visualize != False:
from lace.mesh import Mesh
from lace.meshviewer import MeshViewer
mv = MeshViewer() if visualize is True else visualize
a_T = R.dot(a) + T
mv.set_dynamic_meshes([
Mesh(v=a.T, f=[]).set_vertex_colors('red'),
Mesh(v=b.T, f=[]).set_vertex_colors('green'),
Mesh(v=a_T.T, f=[]).set_vertex_colors('orange'),
])
return R, T
def find_rigid_rotation(a, b, allow_scaling=False):
"""
Args:
a: a 3xN array of vertex locations
b: a 3xN array of vertex locations
Returns: R such that R.dot(a) ~= b
See link: http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
"""
import numpy as np
import scipy.linalg
from blmath.numerics.matlab import col
assert a.shape[0] == 3
assert b.shape[0] == 3
if a.size == 3:
cx = np.cross(a.ravel(), b.ravel())
a = np.hstack((col(a), col(cx)))
b = np.hstack((col(b), col(cx)))
c = a.dot(b.T)
u, _, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if scipy.linalg.det(R) < 0:
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
if allow_scaling:
scalefactor = scipy.linalg.norm(b) / scipy.linalg.norm(a)
R = R * scalefactor
return R
| bsd-2-clause | 5,908,274,546,677,623,000 | 27.521739 | 127 | 0.552973 | false |
Exgibichi/statusquo | test/functional/httpbasics.py | 1 | 4850 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (StatusquoTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because statusquod should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | -2,739,805,825,693,611,000 | 43.090909 | 110 | 0.624536 | false |
Colin-b/pyconfigparser | setup.py | 1 | 1185 | import os
from setuptools import setup, find_packages
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, 'README.md'), 'r') as f:
long_description = f.read()
setup(name='pyconfigparser',
version='0.1',
author='Bounouar Colin',
maintainer='Bounouar Colin',
url='https://github.com/Colin-b/pyconfigparser',
description='Helper to parse configuration files.',
long_description=long_description,
download_url='https://github.com/Colin-b/pyconfigparser',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers"
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Operating System :: Microsoft :: Windows :: Windows 7"
],
keywords=[
'configuration'
],
packages=find_packages(),
install_requires=[
],
platforms=[
'Windows'
]
)
| mit | 2,933,620,478,361,313,300 | 31.916667 | 65 | 0.587342 | false |
redhat-performance/tuned | tuned/plugins/plugin_sysctl.py | 2 | 5586 | import re
from . import base
from .decorators import *
import tuned.logs
from subprocess import *
from tuned.utils.commands import commands
import tuned.consts as consts
import errno
import os
log = tuned.logs.get()
DEPRECATED_SYSCTL_OPTIONS = [ "base_reachable_time", "retrans_time" ]
SYSCTL_CONFIG_DIRS = [ "/run/sysctl.d",
"/etc/sysctl.d" ]
class SysctlPlugin(base.Plugin):
"""
Plugin for applying custom sysctl options.
"""
def __init__(self, *args, **kwargs):
super(SysctlPlugin, self).__init__(*args, **kwargs)
self._has_dynamic_options = True
self._cmd = commands()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
# FIXME: do we want to do this here?
# recover original values in case of crash
storage_key = self._storage_key(instance.name)
instance._sysctl_original = self._storage.get(storage_key, {})
if len(instance._sysctl_original) > 0:
log.info("recovering old sysctl settings from previous run")
self._instance_unapply_static(instance)
instance._sysctl_original = {}
self._storage.unset(storage_key)
instance._sysctl = instance.options
def _instance_cleanup(self, instance):
storage_key = self._storage_key(instance.name)
self._storage.unset(storage_key)
def _instance_apply_static(self, instance):
for option, value in list(instance._sysctl.items()):
original_value = _read_sysctl(option)
if original_value is None:
log.error("sysctl option %s will not be set, failed to read the original value."
% option)
else:
new_value = self._variables.expand(
self._cmd.unquote(value))
new_value = self._process_assignment_modifiers(
new_value, original_value)
if new_value is not None:
instance._sysctl_original[option] = original_value
_write_sysctl(option, new_value)
storage_key = self._storage_key(instance.name)
self._storage.set(storage_key, instance._sysctl_original)
if self._global_cfg.get_bool(consts.CFG_REAPPLY_SYSCTL, consts.CFG_DEF_REAPPLY_SYSCTL):
log.info("reapplying system sysctl")
_apply_system_sysctl()
def _instance_verify_static(self, instance, ignore_missing, devices):
ret = True
# override, so always skip missing
ignore_missing = True
for option, value in list(instance._sysctl.items()):
curr_val = _read_sysctl(option)
value = self._process_assignment_modifiers(self._variables.expand(value), curr_val)
if value is not None:
if self._verify_value(option, self._cmd.remove_ws(value), self._cmd.remove_ws(curr_val), ignore_missing) == False:
ret = False
return ret
def _instance_unapply_static(self, instance, full_rollback = False):
for option, value in list(instance._sysctl_original.items()):
_write_sysctl(option, value)
def _apply_system_sysctl():
files = {}
for d in SYSCTL_CONFIG_DIRS:
try:
flist = os.listdir(d)
except OSError:
continue
for fname in flist:
if not fname.endswith(".conf"):
continue
if fname not in files:
files[fname] = d
for fname in sorted(files.keys()):
d = files[fname]
path = "%s/%s" % (d, fname)
_apply_sysctl_config_file(path)
_apply_sysctl_config_file("/etc/sysctl.conf")
def _apply_sysctl_config_file(path):
log.debug("Applying sysctl settings from file %s" % path)
try:
with open(path, "r") as f:
for lineno, line in enumerate(f, 1):
_apply_sysctl_config_line(path, lineno, line)
log.debug("Finished applying sysctl settings from file %s"
% path)
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
log.error("Error reading sysctl settings from file %s: %s"
% (path, str(e)))
def _apply_sysctl_config_line(path, lineno, line):
line = line.strip()
if len(line) == 0 or line[0] == "#" or line[0] == ";":
return
tmp = line.split("=", 1)
if len(tmp) != 2:
log.error("Syntax error in file %s, line %d"
% (path, lineno))
return
option, value = tmp
option = option.strip()
if len(option) == 0:
log.error("Syntax error in file %s, line %d"
% (path, lineno))
return
value = value.strip()
_write_sysctl(option, value, ignore_missing = True)
def _get_sysctl_path(option):
return "/proc/sys/%s" % option.replace(".", "/")
def _read_sysctl(option):
path = _get_sysctl_path(option)
try:
with open(path, "r") as f:
line = ""
for i, line in enumerate(f):
if i > 0:
log.error("Failed to read sysctl parameter '%s', multi-line values are unsupported"
% option)
return None
value = line.strip()
log.debug("Value of sysctl parameter '%s' is '%s'"
% (option, value))
return value
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
log.error("Failed to read sysctl parameter '%s', the parameter does not exist"
% option)
else:
log.error("Failed to read sysctl parameter '%s': %s"
% (option, str(e)))
return None
def _write_sysctl(option, value, ignore_missing = False):
path = _get_sysctl_path(option)
if os.path.basename(path) in DEPRECATED_SYSCTL_OPTIONS:
log.error("Refusing to set deprecated sysctl option %s"
% option)
return False
try:
log.debug("Setting sysctl parameter '%s' to '%s'"
% (option, value))
with open(path, "w") as f:
f.write(value)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
log_func = log.debug if ignore_missing else log.error
log_func("Failed to set sysctl parameter '%s' to '%s', the parameter does not exist"
% (option, value))
else:
log.error("Failed to set sysctl parameter '%s' to '%s': %s"
% (option, value, str(e)))
return False
| gpl-2.0 | -8,132,232,217,691,364,000 | 29.692308 | 118 | 0.674902 | false |
UCSD-E4E/aerial_lidar | catkin_ws/src/laser_tfs/src/nodes/plot.py | 1 | 1482 | #!/usr/bin/env python
import roslib
roslib.load_manifest('laser_tfs')
import rospy
import math
import matplotlib.pyplot as plt
import numpy as np
import tf
from geometry_msgs.msg import PoseWithCovarianceStamped
def handle_fcu_pose(msg):
br = tf.TransformBroadcaster()
global count
p = msg.pose.pose.position # capture translational position
o = msg.pose.pose.orientation # capture quaternion
if handle_fcu_pose.first_run:
handle_fcu_pose.origin = p
count = 0
handle_fcu_pose.first_run = False
x_p = p.x - handle_fcu_pose.origin.x
y_p = p.y - handle_fcu_pose.origin.y
# print "x " + str(x_p)
# print "y " + str(y_p)
# print " "
plt.ion()
xdata = [0]*10
ydata = [0]*10
plt.ylim([-150,150])
plt.xlim([-150,150])
if count < 10:
count = count + 1
line, = plt.plot(xdata,ydata, 'ro')
if count == 10:
xdata.append(x_p)
ydata.append(y_p)
line.set_xdata(np.arange(len(xdata)))
line.set_xdata(xdata)
line.set_ydata(np.arange(len(ydata)))
line.set_ydata(ydata)
count = 0
plt.draw()
del ydata[0]
del xdata[0]
if __name__ == '__main__':
rospy.init_node('test_broadcaster')
rospy.Subscriber('/mavros/global_position/local',
PoseWithCovarianceStamped,
handle_fcu_pose)
handle_fcu_pose.first_run = True
handle_fcu_pose.origin = None
rospy.spin()
| mit | 5,908,898,019,131,150,000 | 24.118644 | 66 | 0.597841 | false |
nityansuman/Python-3 | data_structures/tuples.py | 1 | 1373 | # Tuple data structure
sample_tuple = ('Glenn', 'Sally', 'John')
print(sample_tuple)
# First Element of the tuple
print(sample_tuple[0])
y = (1, 9, 15) # New tuple
print(max(y)) # Max value of the tuple
# Tuples are immutable like strings, cannot change the value of tuples
# You cannot sort, reverse or append the tuples
t = tuple()
print(dir(t)) # Check method applicable on tuples
(x, y) = (4, 'Nityan')
(a, b) = (99, 'Ram')
(c, d) = ('Nityan', '4')
print(x, y)
print(a, b)
print(c, d)
# Convert dictionary into tuples
mdict = {'root': 1, 'roll': 1, 'nityan': 2}
t = mdict.items()
print(t)
# Loop through tuples with two iteratives
for (i, j) in t:
print(i, j)
# Tuples are comparable in a weird way
# The comparison occurs from first item and goes on if previous is equal and stops if a value is achieved either true or false
print((0, 1, 2) < (5, 4, 2))
# 0<5: So done.
# True
print((2, 4, 6) < (2, 8, 4))
# 2-2 is same. So continue ---> 4<8: So done
# True
print((1, 6) > (4, 3))
# since 1!>4: So done
# False
print(('John', 'Nityan') > ('Apple', 'Mango'))
# False taking ascii values J>A: So done, else True
# Sorting the tuple using its key
t2 = sorted(d.items(), reverse=True)
print(t2)
# Sort by value: reversing the data in tuple i.e., descending order
t2.sort(reverse=True)
print(t2)
| gpl-3.0 | 3,198,536,309,866,640,000 | 21.672414 | 126 | 0.622724 | false |
SKIRT/PTS | magic/misc/imfit.py | 1 | 12412 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.misc.imfit Working with IMfit
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Code for reading in and analyzing output of imfit
import glob
import math
import numpy as np
# Import the relevant PTS classes and modules
from . import imfit_funcs as imfuncs
# -----------------------------------------------------------------
# dictionary mapping imfit function short names (as found in the config/parameter file) to
# corresponding 1-D Python functions in imfit_funcs.py, along with some useful information:
# "function" = corresponding imfit_funcs.py function, if one exists
# "nSkip" = the number of 2D-related parameters to skip (e.g., PA, ellipticity),
# "ell" = index for ellipticity parameter, if it exists,
# "a" = index or indices for semi-major-axis parameters (r_e, h, sigma, etc.)
imfitFunctionMap = {"Exponential": {"function": imfuncs.Exponential, "nSkip": 2, "ell": 1, "a": [3]},
"Exponential_GenEllipse": {"function": imfuncs.Exponential, "nSkip": 3, "ell": 1, "a": [4]},
"Sersic": {"function": imfuncs.Sersic, "nSkip": 2, "ell": 1, "a": [4]},
"Sersic_GenEllipse": {"function": imfuncs.Sersic, "nSkip": 3, "ell": 1, "a": [5]},
"Gaussian": {"function": imfuncs.Gauss, "nSkip": 2, "ell": 1, "a": [3]},
"GaussianRing": {"function": imfuncs.GaussRing, "nSkip": 2, "ell": 1, "a": [3,4]},
"GaussianRing2Side": {"function": imfuncs.GaussRing2Side, "nSkip": 2, "ell": 1, "a": [3,4,5]},
"Moffat": {"function": imfuncs.Moffat, "nSkip": 2, "ell": 1, "a": [3]},
"BrokenExponential": {"function": imfuncs.BrokenExp, "nSkip": 2, "ell": 1, "a": [3,4,5]}}
# -----------------------------------------------------------------
class Imfit(object):
"""
This class ...
"""
def __init__(self, path):
"""
This function ...
:param path:
"""
# -----------------------------------------------------------------
def ChopComments(theLine):
return theLine.split("#")[0]
# -----------------------------------------------------------------
def GetFunctionImageNames(baseName, funcNameList):
"""Generate a list of FITS filenames as would be created by makeimage in "--output-functions"
mode.
"""
nImages = len(funcNameList)
imageNameList = [ "%s%d_%s.fits" % (baseName, i + 1, funcNameList[i]) for i in range(nImages) ]
return imageNameList
# -----------------------------------------------------------------
def ReadImfitConfigFile( fileName, minorAxis=False, pix=1.0, getNames=False, X0=0.0 ):
"""Function to read and parse an imfit-generated parameter file
(or input config file) and return a tuple consisting of:
(list of 1-D imfit_funcs functions, list of lists of parameters).
pix = scale in arcsec/pixel, if desired for plotting vs radii in arcsec.
We assume that all functions have a center at x = 0; this can be changed by setting
X0.
Returns tuple of (functionList, trimmedParameterList)
If getNames == True:
Returns tuple of (functionNameList, functionList, trimmedParameterList)
"""
dlines = [ line for line in open(fileName) if len(line.strip()) > 0 and line[0] != "#" ]
funcNameList = []
paramMetaList = []
currentParamList = []
nLines = len(dlines)
for line in dlines:
trimmedLine = ChopComments(line)
#print(trimmedLine)
if trimmedLine.find("X0") == 0:
continue
if trimmedLine.find("Y0") == 0:
continue
if trimmedLine.find("FUNCTION") == 0:
# if this isn't the first function, store the previous set of parameters
if len(currentParamList) > 0:
paramMetaList.append(currentParamList)
# make a new parameter list for the new function
currentParamList = [X0]
pp = trimmedLine.split()
fname = pp[1].strip()
funcNameList.append(fname)
continue
else:
pp = trimmedLine.split()
newValue = float(pp[1])
currentParamList.append(newValue)
# ensure that final set of parameters get stored:
paramMetaList.append(currentParamList)
# process function list to remove unneeded parameters (and convert size measures
# from major-axis to minor-axis, if requested)
funcList = [ imfitFunctionMap[fname]["function"] for fname in funcNameList ]
trimmedParamList = []
nFuncs = len(funcList)
for i in range(nFuncs):
fname = funcNameList[i]
nSkipParams = imfitFunctionMap[fname]["nSkip"]
fullParams = paramMetaList[i]
# calculate scaling factor for minor-axis values, if needed
if minorAxis is True:
print(fname)
ellIndex = imfitFunctionMap[fname]["ell"]
print(ellIndex)
ell = fullParams[ellIndex+1]
q = 1.0 - ell
else:
q = 1.0
print(i, fname)
smaIndices = imfitFunctionMap[fname]["a"]
# convert length values to arcsec and/or minor-axis, if needed,
for smaIndex in smaIndices:
# +1 to account for X0 value at beginning of parameter list
fullParams[smaIndex+1] = pix*q*fullParams[smaIndex+1]
# construct the final 1-D parameter set for this function: X0 value, followed
# by post-2D-shape parameters
trimmedParams = [fullParams[0]]
trimmedParams.extend(fullParams[nSkipParams+1:])
trimmedParamList.append(trimmedParams)
if getNames is True:
return (funcNameList, funcList, trimmedParamList)
else:
return (funcList, trimmedParamList)
# -----------------------------------------------------------------
# Code for reading output of bootstrap resampling and MCMC chains
def GetBootstrapOutput(filename):
"""Reads imfit's bootstrap-resampling output when saved using the
--save-bootstrap command-line option.
Parameters
----------
filename : str
name of file with bootstrap-resampling output
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_iterations, n_parameters)
"""
# get first 100 lines
# FIXME: file *could* be shorter than 100 lines; really complicated
# model could have > 100 lines of header...
with open(filename) as theFile:
firstLines = [next(theFile) for x in range(100)]
# find header line with column names and extract column names
for i in range(len(firstLines)):
if firstLines[i].find("# Bootstrap resampling output") >= 0:
columnNamesIndex = i + 1
break
columnNames = firstLines[columnNamesIndex][1:].split()
for i in range(len(columnNames)):
if columnNames[i] == "likelihood":
nParamColumns = i
break
# get the data
d = np.loadtxt(filename)
return (columnNames, d)
# -----------------------------------------------------------------
def GetSingleChain(filename, getAllColumns=False):
"""Reads a single MCMC chain output file and returns a tuple of column names
and a numpy array with the data.
Parameters
----------
filename : str
name of file with MCMC output chain
getAllColumns: bool, optional
if False [default], only model parameter-value columns are retrieved;
if True, all output columns (including MCMC diagnostics) are retrieved
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_iterations, n_parameters)
"""
# get first 100 lines
# FIXME: file *could* be shorter than 100 lines; really complicated
# model could have > 100 lines of header...
with open(filename) as theFile:
firstLines = [next(theFile) for x in range(100)]
# find header line with column names and extract column names
for i in range(len(firstLines)):
if firstLines[i].find("# Column Headers") >= 0:
columnNamesIndex = i + 1
break
columnNames = firstLines[columnNamesIndex][1:].split()
for i in range(len(columnNames)):
if columnNames[i] == "likelihood":
nParamColumns = i
break
# get data for all columns, or just the model parameters?
whichCols = None
if not getAllColumns:
whichCols = list(range(nParamColumns))
outputColumnNames = columnNames[:nParamColumns]
else:
whichCols = None
outputColumnNames = columnNames
# get the data
d = np.loadtxt(filename, usecols=whichCols)
return (outputColumnNames, d)
# -----------------------------------------------------------------
def MergeChains( fname_root, maxChains=None, getAllColumns=False, start=10000, last=None,
secondHalf=False ):
"""
Reads and concatenates all MCMC output chains with filenames = fname_root.*.txt,
using data from t=start onwards. By default, all generations from each chain
are extracted; this can be modified with the start, last, or secondHalf keywords.
Parameters
----------
fname_root : str
root name of output chain files (e.g., "mcmc_out")
maxChains : int or None, optional
maximum number of chain files to read [default = None = read all files]
getAllColumns : bool, optional
if False [default], only model parameter-value columns are retrieved;
if True, all output columns (including MCMC diagnostics) are retrieved
start : int, optional
extract samples from each chain beginning with time = start
ignored if "secondHalf" is True or if "last" is not None
last : int or None, optional
extract last N samples from each chain
ignored if "secondHalf" is True
secondHalf : bool, optional
if True, only the second half of each chain is extracted
if False [default],
Returns
-------
(column_names, data_array) : tuple of (list, np.ndarray)
column_names = list of column names (strings)
data_array = numpy array of parameter values
with shape = (n_samples, n_parameters)
"""
# construct list of filenames
if maxChains is None:
globPattern = "{0}.*.txt".format(fname_root)
filenames = glob.glob(globPattern)
else:
filenames = ["{0}.{1}.txt".format(fname_root, n) for n in range(maxChains)]
nFiles = len(filenames)
# get the first chain so we can tell how long the chains are
(colNames, dd) = GetSingleChain(filenames[0], getAllColumns=getAllColumns)
nGenerations = dd.shape[0]
# figure out what part of full chain to extract
if secondHalf is True:
startTime = int(math.floor(nGenerations / 2))
elif last is not None:
startTime = -last
else:
startTime = start
# get first chain and column names; figure out if we get all columns or just
# model parameters
if (startTime >= nGenerations):
txt = "WARNING: # generations in MCMC chain file {0} ({1:d}) is <= ".format(filenames[0],
nGenerations)
txt += "requested start time ({0:d})!\n".format(startTime)
print(txt)
return None
dd_final = dd[startTime:,:]
if getAllColumns is False:
nParamColumns = len(colNames)
whichCols = list(range(nParamColumns))
else:
whichCols = None
# get and append rest of chains if more than 1 chain-file was requested
if nFiles > 1:
for i in range(1, nFiles):
dd_next = np.loadtxt(filenames[i], usecols=whichCols)
dd_final = np.concatenate((dd_final, dd_next[startTime:,:]))
return (colNames, dd_final)
# -----------------------------------------------------------------
| agpl-3.0 | -2,269,840,293,426,458,600 | 35.395894 | 102 | 0.593183 | false |
google/iree | third_party/format_diff/format_diff.py | 1 | 5904 | #!/usr/bin/env python3
#
#===- format_diff.py - Diff Reformatter ----*- python3 -*--===#
#
# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
"""
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage:
git diff -U0 HEAD^ | python3 format_diff.py yapf -i
git diff -U0 HEAD^ | python3 format_diff.py clang-format -i
svn diff --diff-cmd=diff -x-U0 | python3 format_diff.py -p0 clang-format -i
General usage:
<some diff> | python3 format_diff.py [--regex] [--lines-style] [-p] binary [args for binary]
It should be noted that the filename contained in the diff is used unmodified
to determine the source file to update. Users calling this script directly
should be careful to ensure that the path in the diff is correct relative to the
current working directory.
"""
import argparse
import difflib
import io
import re
import subprocess
import sys
BINARY_TO_DEFAULT_REGEX = {
"yapf": r".*\.py",
"clang-format":
r".*\.(cpp|cc|c\+\+|cxx|c|cl|h|hh|hpp|hxx|m|mm|inc|js|ts|proto|"
r"protodevel|java|cs)",
}
def parse_arguments():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"binary",
help="Location of binary to use for formatting. This controls the "
"default values of --regex and --lines-style. If binary isn't 'yapf' "
"or 'clang-format' then --regex and --lines-style are required.")
parser.add_argument(
"--regex",
metavar="PATTERN",
default=None,
help="Regex pattern for selecting file paths to reformat from the piped "
"diff. This flag is required if 'binary' is not set to 'yapf' or "
"'clang-format'. Otherwise, this flag overrides the default pattern that "
"--binary sets.")
parser.add_argument(
"--lines-style",
default=None,
help="How to style the 'lines' argument for the given binary. Can be set "
"to 'yapf' or 'clang-format'. This flag is required if 'binary' is not "
"set to 'yapf' or 'clang-format'.")
parser.add_argument(
"-p",
metavar="NUM",
default=1,
help="Strip the smallest prefix containing P slashes. Set to 0 if "
"passing `--no-prefix` to `git diff` or using `svn`")
# Parse and error-check arguments
args, binary_args = parser.parse_known_args()
if args.binary not in BINARY_TO_DEFAULT_REGEX:
if not args.regex:
raise parser.error("If 'binary' is not 'yapf' or 'clang-format' then "
"--regex must be set.")
if not args.lines_style:
raise parser.error("If 'binary' is not 'yapf' or 'clang-format' then "
"--lines-style must be set.")
else:
# Set defaults based off of 'binary'.
if not args.regex:
args.regex = BINARY_TO_DEFAULT_REGEX[args.binary]
if not args.lines_style:
args.lines_style = args.binary
if args.lines_style not in ["yapf", "clang-format"]:
raise parser.error(f"Unexpected value for --line-style {args.lines_style}")
return args, binary_args
def main():
args, binary_args = parse_arguments()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
# Match all filenames.
match = re.search(fr"^\+\+\+\ (.*?/){{{args.p}}}(\S*)", line)
if match:
filename = match.group(2)
if filename is None:
continue
# Match all filenames specified by --regex.
if not re.match(f"^{args.regex}$", filename):
continue
# Match unified diff line numbers.
match = re.search(r"^@@.*\+(\d+)(,(\d+))?", line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
if args.lines_style == "yapf":
lines = ["--lines", f"{start_line}-{end_line}"]
elif args.lines_style == "clang-format":
lines = ["-lines", f"{start_line}:{end_line}"]
lines_by_file.setdefault(filename, []).extend(lines)
# Pass the changed lines to 'binary' alongside any unparsed args (e.g. -i).
for filename, lines in lines_by_file.items():
command = [args.binary, filename]
command.extend(lines)
command.extend(binary_args)
print(f"Running `{' '.join(command)}`")
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=None,
stdin=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
# If the formatter printed the formatted code to stdout then print out
# a unified diff between the formatted and unformatted code.
# If flags like --verbose are passed to the binary then the diffs this
# produces won't be particularly helpful.
formatted_code = io.StringIO(stdout).readlines()
if len(formatted_code):
with open(filename) as f:
unformatted_code = f.readlines()
diff = difflib.unified_diff(unformatted_code,
formatted_code,
fromfile=filename,
tofile=filename,
fromfiledate="(before formatting)",
tofiledate="(after formatting)")
diff_string = "".join(diff)
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == "__main__":
main()
| apache-2.0 | -8,796,428,589,158,364,000 | 34.781818 | 94 | 0.612297 | false |
Azure/azure-sdk-for-python | sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_backup_operation_results_operations.py | 1 | 5214 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BackupOperationResultsOperations(object):
"""BackupOperationResultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
vault_name, # type: str
resource_group_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Provides the status of the delete operations such as deleting backed up item. Once the
operation has started, the
status code in the response would be Accepted. It will continue to be in this state till it
reaches completion. On
successful completion, the status code will be OK. This method expects OperationID as an
argument. OperationID is
part of the Location header of the operation response.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param operation_id: OperationID which represents the operation.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupOperationResults/{operationId}'} # type: ignore
| mit | -1,931,421,254,802,879,700 | 45.553571 | 205 | 0.664173 | false |
skosukhin/spack | var/spack/repos/builtin/packages/py-python-gitlab/package.py | 1 | 1868 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPythonGitlab(PythonPackage):
"""Python wrapper for the GitLab API"""
homepage = "https://github.com/gpocentek/python-gitlab"
url = "https://pypi.io/packages/source/p/python-gitlab/python-gitlab-0.19.tar.gz"
version('0.19', '6564d7204c2b7e65c54b3fa89ec91df6')
version('0.18', 'c31dae1d0bab3966cb830f2308a96308')
version('0.17', '8a69c602e07dd4731856531d79bb58eb')
version('0.16', 'e0421d930718021e7d796d74d2ad7194')
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | 5,940,696,918,200,540,000 | 44.560976 | 90 | 0.67773 | false |
troywatson/Python-Grammar-Checker | fartherVsFurther/CountSyllables.py | 1 | 2787 |
'''
Count Syllables v1.0
A simple class to count syllables using a dictionary method
This class will attempt to calculate syllables of words not found in dictionary
'''
class CountSyllables(object):
def __init__(self):
# variables- instantiated
self.prepareData()
def generateDict(self):
# converts a pronunciation dictionary into a syllable count dictionary
fileName = open("dict.txt", 'r')
print 'openning file...'
data = fileName.read()
fileName.close()
print 'splitting up data by entries...'
words = data.split("\n")
outputFile = open("syllables.txt", 'w')
for entry in words:
entry = entry.split(" ")
word = entry[0]
pronunciation = entry[1]
sections = pronunciation.split(" ")
count = 0
for section in sections:
if self.isVowel(section):
count+=1
if count == 0: count = 1
outputFile.write(word.lower() + ',' + str(count) + '\n')
outputFile.close()
def isVowel(self, word):
# a simple function to find whether a word contains a vowel or not
word = word.lower()
if 'a' in word or 'e' in word or 'i' in word or 'o' in word or 'u' in word:
return True
else: return False
def prepareData(self):
fileName = open('SyllableCounter/syllables.txt', 'r')
self.dict = {}
data = fileName.read()
fileName.close()
lines = data.split('\n')
for line in lines:
entry = line.split(',')
if len(entry[0]) < 1: continue
if entry[0] in self.dict: continue
else: self.dict[entry[0]] = entry[1]
def count(self, word):
if word in self.dict: return self.dict[word]
syllCount = 0
for letter in word:
if self.isVowel(letter): syllCount += 1
if syllCount < 1: return 1
else: return syllCount
def main():
test = CountSyllables()
print test.count('elephant')
if __name__ == '__main__':main()
| mit | -3,347,954,875,784,145,400 | 35.671053 | 116 | 0.425188 | false |
aronasorman/kolibri | kolibri/auth/test/test_api.py | 1 | 18028 | from __future__ import absolute_import, print_function, unicode_literals
import collections
import factory
import sys
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase as BaseTestCase
from django.contrib.sessions.models import Session
from .. import models
DUMMY_PASSWORD = "password"
# A weird hack because of http://bugs.python.org/issue17866
if sys.version_info >= (3,):
class APITestCase(BaseTestCase):
def assertItemsEqual(self, *args, **kwargs):
self.assertCountEqual(*args, **kwargs)
else:
class APITestCase(BaseTestCase):
pass
class FacilityFactory(factory.DjangoModelFactory):
class Meta:
model = models.Facility
name = factory.Sequence(lambda n: "Rock N' Roll High School #%d" % n)
class ClassroomFactory(factory.DjangoModelFactory):
class Meta:
model = models.Classroom
name = factory.Sequence(lambda n: "Basic Rock Theory #%d" % n)
class LearnerGroupFactory(factory.DjangoModelFactory):
class Meta:
model = models.LearnerGroup
name = factory.Sequence(lambda n: "Group #%d" % n)
class FacilityUserFactory(factory.DjangoModelFactory):
class Meta:
model = models.FacilityUser
facility = factory.SubFactory(FacilityFactory)
username = factory.Sequence(lambda n: 'user%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class DeviceOwnerFactory(factory.DjangoModelFactory):
class Meta:
model = models.DeviceOwner
username = factory.Sequence(lambda n: 'deviceowner%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class LearnerGroupAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(3)]
self.learner_groups = []
for classroom in self.classrooms:
self.learner_groups += [LearnerGroupFactory.create(parent=classroom) for _ in range(5)]
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_learnergroup_list(self):
response = self.client.get(reverse('learnergroup-list'), format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
)) for group in self.learner_groups]
self.assertItemsEqual(response.data, expected)
def test_learnergroup_detail(self):
response = self.client.get(reverse('learnergroup-detail', kwargs={'pk': self.learner_groups[0].id}), format='json')
expected = {
'id': self.learner_groups[0].id,
'name': self.learner_groups[0].name,
'parent': self.learner_groups[0].parent.id,
}
self.assertDictEqual(response.data, expected)
def test_parent_in_queryparam_with_one_id(self):
classroom_id = self.classrooms[0].id
response = self.client.get(reverse('learnergroup-list'), {'parent': classroom_id},
format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
)) for group in self.learner_groups if group.parent.id == classroom_id]
self.assertItemsEqual(response.data, expected)
class ClassroomAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(10)]
self.learner_group = LearnerGroupFactory.create(parent=self.classrooms[0])
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_classroom_list(self):
response = self.client.get(reverse('classroom-list'), format='json')
expected = [collections.OrderedDict((
('id', classroom.id),
('name', classroom.name),
('parent', classroom.parent.id),
)) for classroom in self.classrooms]
self.assertItemsEqual(response.data, expected)
def test_classroom_detail(self):
response = self.client.get(reverse('classroom-detail', kwargs={'pk': self.classrooms[0].id}), format='json')
expected = {
'id': self.classrooms[0].id,
'name': self.classrooms[0].name,
'parent': self.classrooms[0].parent.id,
}
self.assertDictEqual(response.data, expected)
class FacilityAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility1 = FacilityFactory.create()
self.facility2 = FacilityFactory.create()
self.user1 = FacilityUserFactory.create(facility=self.facility1)
self.user2 = FacilityUserFactory.create(facility=self.facility2)
def test_sanity(self):
self.assertTrue(self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1))
def test_facility_user_can_get_detail(self):
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
response = self.client.get(reverse('facility-detail', kwargs={'pk': self.facility1.pk}),
format='json')
# .assertDictContainsSubset checks that the first argument is a subset of the second argument
self.assertDictContainsSubset({
'name': self.facility1.name,
}, dict(response.data))
def test_anonymous_user_gets_empty_list(self):
response = self.client.get(reverse('facility-list'), format='json')
self.assertEqual(response.data, [])
def test_device_admin_can_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 1)
def test_facility_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_anonymous_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_device_admin_can_update_facility(self):
old_facility_name = self.facility1.name
new_facility_name = "Renamed Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, old_facility_name)
response = self.client.put(reverse('facility-detail', kwargs={"pk": self.facility1.id}), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, new_facility_name)
def test_device_admin_can_delete_facility(self):
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 1)
response = self.client.delete(reverse('facility-detail', kwargs={"pk": self.facility1.id}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 0)
class UserCreationTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_creating_device_owner_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
response = self.client.post(reverse('deviceowner-list'), {"username": new_username, "password": new_password}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.DeviceOwner.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.DeviceOwner.objects.get(username=new_username).check_password(bad_password))
def test_creating_facility_user_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.FacilityUser.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.FacilityUser.objects.get(username=new_username).check_password(bad_password))
def test_creating_same_facility_user_throws_400_error(self):
new_username = "goliath"
new_password = "davidsucks"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_creating_same_device_owner_throws_400_error(self):
new_username = "goliath"
new_password = "davidsucks"
data = {"username": new_username, "password": new_password}
response = self.client.post(reverse('deviceowner-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(reverse('deviceowner-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UserUpdateTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.user = FacilityUserFactory.create(facility=self.facility)
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_user_update_info(self):
self.client.patch(reverse('facilityuser-detail', kwargs={'pk': self.user.pk}), {'username': 'foo'}, format="json")
self.user.refresh_from_db()
self.assertEqual(self.user.username, "foo")
def test_user_update_password(self):
new_password = 'baz'
self.client.patch(reverse('facilityuser-detail', kwargs={'pk': self.user.pk}), {'password': new_password}, format="json")
self.client.logout()
response = self.client.login(username=self.user.username, password=new_password, facility=self.facility)
self.assertTrue(response)
def test_device_owner_update_info(self):
self.client.patch(reverse('deviceowner-detail', kwargs={'pk': self.device_owner.pk}), {'username': 'foo'}, format="json")
self.device_owner.refresh_from_db()
self.assertEqual(self.device_owner.username, "foo")
def test_device_owner_update_password(self):
new_password = 'baz'
self.client.patch(reverse('deviceowner-detail', kwargs={'pk': self.device_owner.pk}), {'password': new_password}, format="json")
self.client.logout()
response = self.client.login(username=self.device_owner.username, password=new_password)
self.assertTrue(response)
class LoginLogoutTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.user = FacilityUserFactory.create(facility=self.facility)
self.admin = FacilityUserFactory.create(facility=self.facility, password="bar")
self.facility.add_admin(self.admin)
self.cr = ClassroomFactory.create(parent=self.facility)
self.cr.add_coach(self.admin)
def test_login_and_logout_device_owner(self):
self.client.post(reverse('session-list'), data={"username": self.device_owner.username, "password": DUMMY_PASSWORD})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_login_and_logout_facility_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": DUMMY_PASSWORD, "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_incorrect_credentials_does_not_log_in_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": "foo", "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 0)
def test_session_return_admin_and_coach_kind(self):
self.client.post(reverse('session-list'), data={"username": self.admin.username, "password": "bar", "facility": self.facility.id})
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'admin')
self.assertTrue(response.data['kind'][1], 'coach')
def test_session_return_anon_kind(self):
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'anonymous')
class AnonSignUpTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
def test_anon_sign_up_creates_user(self):
response = self.client.post(reverse('signup-list'), data={"username": "user", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.FacilityUser.objects.all())
def test_anon_sign_up_returns_user(self):
full_name = "Bob Lee"
response = self.client.post(reverse('signup-list'), data={"full_name": full_name, "username": "user", "password": DUMMY_PASSWORD})
self.assertEqual(response.data['username'], 'user')
self.assertEqual(response.data['full_name'], full_name)
def test_create_user_with_same_username_fails(self):
FacilityUserFactory.create(username='bob')
response = self.client.post(reverse('signup-list'), data={"username": "bob", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(len(models.FacilityUser.objects.all()), 1)
def test_create_bad_username_fails(self):
response = self.client.post(reverse('signup-list'), data={"username": "(***)", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.FacilityUser.objects.all())
def test_sign_up_also_logs_in_user(self):
self.assertFalse(Session.objects.all())
self.client.post(reverse('signup-list'), data={"username": "user", "password": DUMMY_PASSWORD})
self.assertTrue(Session.objects.all())
class FacilityDatasetAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
FacilityFactory.create(name='extra')
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.facility.add_admin(self.admin)
def test_return_dataset_that_user_is_an_admin_for(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), 1)
self.assertEqual(self.admin.dataset_id, response.data[0]['id'])
def test_return_all_datasets_for_device_owner(self):
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), len(models.FacilityDataset.objects.all()))
def test_return_nothing_for_facility_user(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), 0)
| mit | 3,542,706,537,411,612,700 | 46.193717 | 146 | 0.68499 | false |
yeming233/rally | rally/plugins/common/validators.py | 1 | 14381 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
import jsonschema
import six
from rally.common import logging
from rally.common import validation
LOG = logging.getLogger(__name__)
class ValidatorUtils(object):
@staticmethod
def _file_access_ok(filename, mode, param_name, required=True):
if not filename:
return validation.ValidationResult(
not required,
"Parameter %s required" % param_name)
if not os.access(os.path.expanduser(filename), mode):
return validation.ValidationResult(
False, "Could not open %(filename)s with mode %(mode)s "
"for parameter %(param_name)s"
% {"filename": filename, "mode": mode,
"param_name": param_name})
return validation.ValidationResult(True)
@validation.configure(name="jsonschema")
class JsonSchemaValidator(validation.Validator):
"""JSON schema validator"""
def validate(self, credentials, config, plugin_cls, plugin_cfg):
try:
jsonschema.validate(plugin_cfg, plugin_cls.CONFIG_SCHEMA)
except jsonschema.ValidationError as err:
return self.fail(str(err))
@validation.configure(name="args-spec")
class ArgsValidator(validation.Validator):
"""Scenario arguments validator"""
def validate(self, credentials, config, plugin_cls, plugin_cfg):
scenario = plugin_cls
name = scenario.get_name()
namespace = scenario.get_platform()
scenario = scenario().run
args, _varargs, varkwargs, defaults = inspect.getargspec(scenario)
hint_msg = (" Use `rally plugin show --name %s --namespace %s` "
"to display scenario description." % (name, namespace))
# scenario always accepts an instance of scenario cls as a first arg
missed_args = args[1:]
if defaults:
# do not require args with default values
missed_args = missed_args[:-len(defaults)]
if "args" in config:
missed_args = set(missed_args) - set(config["args"])
if missed_args:
msg = ("Argument(s) '%(args)s' should be specified in task config."
"%(hint)s" % {"args": "', '".join(missed_args),
"hint": hint_msg})
return self.fail(msg)
if varkwargs is None and "args" in config:
redundant_args = set(config["args"]) - set(args[1:])
if redundant_args:
msg = ("Unexpected argument(s) found ['%(args)s'].%(hint)s" %
{"args": "', '".join(redundant_args),
"hint": hint_msg})
return self.fail(msg)
@validation.configure(name="required_params")
class RequiredParameterValidator(validation.Validator):
"""Scenario required parameter validator.
This allows us to search required parameters in subdict of config.
:param subdict: sub-dict of "config" to search. if
not defined - will search in "config"
:param params: list of required parameters
"""
def __init__(self, params=None, subdict=None):
super(RequiredParameterValidator, self).__init__()
self.subdict = subdict
self.params = params
def validate(self, credentials, config, plugin_cls, plugin_cfg):
missing = []
args = config.get("args", {})
if self.subdict:
args = args.get(self.subdict, {})
for arg in self.params:
if isinstance(arg, (tuple, list)):
for case in arg:
if case in args:
break
else:
arg = "'/'".join(arg)
missing.append("'%s' (at least one parameter should be "
"specified)" % arg)
else:
if arg not in args:
missing.append("'%s'" % arg)
if missing:
msg = ("%s parameter(s) are not defined in "
"the input task file") % ", ".join(missing)
return self.fail(msg)
@validation.configure(name="number")
class NumberValidator(validation.Validator):
"""Checks that parameter is a number that pass specified condition.
Ensure a parameter is within the range [minval, maxval]. This is a
closed interval so the end points are included.
:param param_name: Name of parameter to validate
:param minval: Lower endpoint of valid interval
:param maxval: Upper endpoint of valid interval
:param nullable: Allow parameter not specified, or parameter=None
:param integer_only: Only accept integers
"""
def __init__(self, param_name, minval=None, maxval=None, nullable=False,
integer_only=False):
self.param_name = param_name
self.minval = minval
self.maxval = maxval
self.nullable = nullable
self.integer_only = integer_only
def validate(self, credentials, config, plugin_cls, plugin_cfg):
value = config.get("args", {}).get(self.param_name)
num_func = float
if self.integer_only:
# NOTE(boris-42): Force check that passed value is not float, this
# is important cause int(float_numb) won't raise exception
if type(value) == float:
return self.fail("%(name)s is %(val)s which hasn't int type"
% {"name": self.param_name, "val": value})
num_func = int
# None may be valid if the scenario sets a sensible default.
if self.nullable and value is None:
return
try:
number = num_func(value)
if self.minval is not None and number < self.minval:
return self.fail(
"%(name)s is %(val)s which is less than the minimum "
"(%(min)s)" % {"name": self.param_name,
"val": number,
"min": self.minval})
if self.maxval is not None and number > self.maxval:
return self.fail(
"%(name)s is %(val)s which is greater than the maximum "
"(%(max)s)" % {"name": self.param_name,
"val": number,
"max": self.maxval})
except (ValueError, TypeError):
return self.fail("%(name)s is %(val)s which is not a valid "
"%(type)s" % {"name": self.param_name,
"val": value,
"type": num_func.__name__})
@validation.configure(name="enum")
class EnumValidator(validation.Validator):
"""Checks that parameter is in a list.
Ensure a parameter has the right value. This value need to be defined
in a list.
:param param_name: Name of parameter to validate
:param values: List of values accepted
:param missed: Allow to accept optional parameter
:param case_insensitive: Ignore case in enum values
"""
def __init__(self, param_name, values, missed=False,
case_insensitive=False):
self.param_name = param_name
self.missed = missed
self.case_insensitive = case_insensitive
if self.case_insensitive:
self.values = []
for value in values:
if isinstance(value, (six.text_type, six.string_types)):
value = value.lower()
self.values.append(value)
else:
self.values = values
def validate(self, credentials, config, plugin_cls, plugin_cfg):
value = config.get("args", {}).get(self.param_name)
if value:
if self.case_insensitive:
if isinstance(value, (six.text_type, six.string_types)):
value = value.lower()
if value not in self.values:
return self.fail("%(name)s is %(val)s which is not a "
"valid value from %(list)s"
% {"name": self.param_name,
"val": value,
"list": self.values})
else:
if not self.missed:
return self.fail("%s parameter is not defined in the "
"task config file" % self.param_name)
@validation.configure(name="restricted_parameters")
class RestrictedParametersValidator(validation.Validator):
def __init__(self, param_names, subdict=None):
"""Validates that parameters is not set.
:param param_names: parameter or parameters list to be validated.
:param subdict: sub-dict of "config" to search for param_names. if
not defined - will search in "config"
"""
super(RestrictedParametersValidator, self).__init__()
if isinstance(param_names, (list, tuple)):
self.params = param_names
else:
self.params = [param_names]
self.subdict = subdict
def validate(self, config, credentials, plugin_cls, plugin_cfg):
restricted_params = []
for param_name in self.params:
args = config.get("args", {})
a_dict, a_key = (args, self.subdict) if self.subdict else (
config, "args")
if param_name in a_dict.get(a_key, {}):
restricted_params.append(param_name)
if restricted_params:
msg = ("You can't specify parameters '{}' in '{}'")
return self.fail(msg.format(
", ".join(restricted_params),
self.subdict if self.subdict else "args"))
@validation.configure(name="required_contexts")
class RequiredContextsValidator(validation.Validator):
def __init__(self, contexts, *args):
"""Validator checks if required contexts are specified.
:param contexts: list of strings and tuples with context names that
should be specified. Tuple represent 'at least one
of the'.
"""
super(RequiredContextsValidator, self).__init__()
if isinstance(contexts, (list, tuple)):
# services argument is a list, so it is a new way of validators
# usage, args in this case should not be provided
self.contexts = contexts
if args:
LOG.warning("Positional argument is not what "
"'required_context' decorator expects. "
"Use `contexts` argument instead")
else:
# it is old way validator
self.contexts = [contexts]
self.contexts.extend(args)
def validate(self, config, credentials, plugin_cls, plugin_cfg):
missing_contexts = []
context = config.get("context", {})
for name in self.contexts:
if isinstance(name, tuple):
if not set(name) & set(context):
# formatted string like: 'foo or bar or baz'
formatted_names = "'{}'".format(" or ".join(name))
missing_contexts.append(formatted_names)
else:
if name not in context:
missing_contexts.append(name)
if missing_contexts:
msg = ("The following context(s) are required but missing from "
"the input task file: {}").format(
", ".join(missing_contexts))
return self.fail(msg)
@validation.configure(name="required_param_or_context")
class RequiredParamOrContextValidator(validation.Validator):
def __init__(self, param_name, ctx_name):
"""Validator checks if required image is specified.
:param param_name: name of parameter
:param ctx_name: name of context
"""
super(RequiredParamOrContextValidator, self).__init__()
self.param_name = param_name
self.ctx_name = ctx_name
def validate(self, config, credentials, plugin_cls, plugin_cfg):
msg = ("You should specify either scenario argument {} or"
" use context {}.").format(self.param_name,
self.ctx_name)
if self.ctx_name in config.get("context", {}):
return
if self.param_name in config.get("args", {}):
return
return self.fail(msg)
@validation.configure(name="file_exists")
class FileExistsValidator(validation.Validator):
def __init__(self, param_name, mode=os.R_OK, required=True):
"""Validator checks parameter is proper path to file with proper mode.
Ensure a file exists and can be accessed with the specified mode.
Note that path to file will be expanded before access checking.
:param param_name: Name of parameter to validate
:param mode: Access mode to test for. This should be one of:
* os.F_OK (file exists)
* os.R_OK (file is readable)
* os.W_OK (file is writable)
* os.X_OK (file is executable)
If multiple modes are required they can be added, eg:
mode=os.R_OK+os.W_OK
:param required: Boolean indicating whether this argument is required.
"""
super(FileExistsValidator, self).__init__()
self.param_name = param_name
self.mode = mode
self.required = required
def validate(self, config, credentials, plugin_cls, plugin_cfg):
return ValidatorUtils._file_access_ok(
config.get("args", {}).get(self.param_name),
self.mode, self.param_name, self.required)
| apache-2.0 | -4,676,884,078,922,632,000 | 37.9729 | 79 | 0.568667 | false |
lgarren/spack | var/spack/repos/builtin/packages/opencv/package.py | 1 | 10399 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Opencv(CMakePackage):
"""OpenCV is released under a BSD license and hence it's free for both
academic and commercial use. It has C++, C, Python and Java interfaces and
supports Windows, Linux, Mac OS, iOS and Android. OpenCV was designed for
computational efficiency and with a strong focus on real-time applications.
Written in optimized C/C++, the library can take advantage of multi-core
processing. Enabled with OpenCL, it can take advantage of the hardware
acceleration of the underlying heterogeneous compute platform. Adopted all
around the world, OpenCV has more than 47 thousand people of user community
and estimated number of downloads exceeding 9 million. Usage ranges from
interactive art, to mines inspection, stitching maps on the web or through
advanced robotics.
"""
homepage = 'http://opencv.org/'
url = 'https://github.com/Itseez/opencv/archive/3.1.0.tar.gz'
version('master', git="https://github.com/opencv/opencv.git", branch="master")
version('3.3.0', '98a4e4c6f23ec725e808a891dc11eec4')
version('3.2.0', 'a43b65488124ba33dde195fea9041b70')
version('3.1.0', '70e1dd07f0aa06606f1bc0e3fa15abd3')
version('2.4.13.2', 'fe52791ce523681a67036def4c25261b')
version('2.4.13.1', 'f6d354500d5013e60dc0fc44b07a63d1')
version('2.4.13', '8feb45a71adad89b8017a777477c3eff')
version('2.4.12.3', '2496a4a4caf8fecfbfc294fbe6a814b0')
version('2.4.12.2', 'bc0c60c2ea1cf4078deef99569912fc7')
version('2.4.12.1', '7192f51434710904b5e3594872b897c3')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('eigen', default=True, description='Activates support for eigen')
variant('ipp', default=True, description='Activates support for IPP')
variant('jasper', default=True, description='Activates support for JasPer')
variant('cuda', default=False, description='Activates support for CUDA')
variant('gtk', default=False, description='Activates support for GTK')
variant('vtk', default=False, description='Activates support for VTK')
variant('qt', default=False, description='Activates support for QT')
variant('python', default=False,
description='Enables the build of Python extensions')
variant('java', default=False,
description='Activates support for Java')
variant('openmp', default=False, description='Activates support for OpenMP threads')
variant('core', default=True, description='Include opencv_core module into the OpenCV build')
variant('highgui', default=False, description='Include opencv_highgui module into the OpenCV build')
variant('imgproc', default=False, description='Include opencv_imgproc module into the OpenCV build')
variant('jpeg', default=False, description='Include JPEG support')
variant('png', default=False, description='Include PNG support')
variant('tiff', default=False, description='Include TIFF support')
variant('zlib', default=False, description='Build zlib from source')
variant('dnn', default=False, description='Build DNN support')
depends_on('eigen~mpfr', when='+eigen', type='build')
depends_on('zlib', when='+zlib')
depends_on('libpng', when='+png')
depends_on('jpeg', when='+jpeg')
depends_on('libtiff', when='+tiff')
depends_on('jasper', when='+jasper')
depends_on('cuda', when='+cuda')
depends_on('gtkplus', when='+gtk')
depends_on('vtk', when='+vtk')
depends_on('qt', when='+qt')
depends_on('java', when='+java')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('[email protected]', when='@3.3.0: +dnn')
extends('python', when='+python')
def cmake_args(self):
spec = self.spec
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format((
'ON' if '+shared' in spec else 'OFF')),
'-DENABLE_PRECOMPILED_HEADERS:BOOL=OFF',
'-DWITH_IPP:BOOL={0}'.format((
'ON' if '+ipp' in spec else 'OFF')),
'-DWITH_CUDA:BOOL={0}'.format((
'ON' if '+cuda' in spec else 'OFF')),
'-DWITH_QT:BOOL={0}'.format((
'ON' if '+qt' in spec else 'OFF')),
'-DWITH_VTK:BOOL={0}'.format((
'ON' if '+vtk' in spec else 'OFF')),
'-DBUILD_opencv_java:BOOL={0}'.format((
'ON' if '+java' in spec else 'OFF')),
'-DBUILD_opencv_core:BOOL={0}'.format((
'ON' if '+core' in spec else 'OFF')),
'-DBUILD_opencv_highgui:BOOL={0}'.format((
'ON' if '+highgui' in spec else 'OFF')),
'-DBUILD_opencv_imgproc:BOOL={0}'.format((
'ON' if '+imgproc' in spec else 'OFF')),
'-DWITH_JPEG:BOOL={0}'.format((
'ON' if '+jpeg' in spec else 'OFF')),
'-DWITH_PNG:BOOL={0}'.format((
'ON' if '+png' in spec else 'OFF')),
'-DWITH_TIFF:BOOL={0}'.format((
'ON' if '+tiff' in spec else 'OFF')),
'-DWITH_ZLIB:BOOL={0}'.format((
'ON' if '+zlib' in spec else 'OFF')),
'-DWITH_OPENMP:BOOL={0}'.format((
'ON' if '+openmp' in spec else 'OFF')),
'-DBUILD_opencv_dnn:BOOL={0}'.format((
'ON' if '+dnn' in spec else 'OFF')),
]
# Media I/O
if '+zlib' in spec:
zlib = spec['zlib']
args.extend([
'-DZLIB_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if '+debug' in spec else 'RELEASE'),
join_path(zlib.prefix.lib,
'libz.{0}'.format(dso_suffix))),
'-DZLIB_INCLUDE_DIR:PATH={0}'.format(zlib.prefix.include)
])
if '+png' in spec:
libpng = spec['libpng']
args.extend([
'-DPNG_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if '+debug' in spec else 'RELEASE'),
join_path(libpng.prefix.lib,
'libpng.{0}'.format(dso_suffix))),
'-DPNG_INCLUDE_DIR:PATH={0}'.format(libpng.prefix.include)
])
if '+jpeg' in spec:
libjpeg = spec['jpeg']
args.extend([
'-DBUILD_JPEG:BOOL=OFF',
'-DJPEG_LIBRARY:FILEPATH={0}'.format(
join_path(libjpeg.prefix.lib,
'libjpeg.{0}'.format(dso_suffix))),
'-DJPEG_INCLUDE_DIR:PATH={0}'.format(libjpeg.prefix.include)
])
if '+tiff' in spec:
libtiff = spec['libtiff']
args.extend([
'-DTIFF_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if '+debug' in spec else 'RELEASE'),
join_path(libtiff.prefix.lib,
'libtiff.{0}'.format(dso_suffix))),
'-DTIFF_INCLUDE_DIR:PATH={0}'.format(libtiff.prefix.include)
])
if '+jasper' in spec:
jasper = spec['jasper']
args.extend([
'-DJASPER_LIBRARY_{0}:FILEPATH={1}'.format((
'DEBUG' if '+debug' in spec else 'RELEASE'),
join_path(jasper.prefix.lib,
'libjasper.{0}'.format(dso_suffix))),
'-DJASPER_INCLUDE_DIR:PATH={0}'.format(jasper.prefix.include)
])
# GUI
if '+gtk' not in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@3:' in spec:
args.extend([
'-DWITH_GTK:BOOL=ON',
'-DWITH_GTK_2_X:BOOL=OFF'
])
elif '^gtkplus@2:3' in spec:
args.extend([
'-DWITH_GTK:BOOL=OFF',
'-DWITH_GTK_2_X:BOOL=ON'
])
# Python
if '+python' in spec:
python_exe = spec['python'].command.path
python_lib = spec['python'].libs[0]
python_include_dir = spec['python'].headers.directories[0]
if '^python@3:' in spec:
args.extend([
'-DBUILD_opencv_python3=ON',
'-DPYTHON3_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON3_LIBRARY={0}'.format(python_lib),
'-DPYTHON3_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python2=OFF',
])
elif '^python@2:3' in spec:
args.extend([
'-DBUILD_opencv_python2=ON',
'-DPYTHON2_EXECUTABLE={0}'.format(python_exe),
'-DPYTHON2_LIBRARY={0}'.format(python_lib),
'-DPYTHON2_INCLUDE_DIR={0}'.format(python_include_dir),
'-DBUILD_opencv_python3=OFF',
])
else:
args.extend([
'-DBUILD_opencv_python2=OFF',
'-DBUILD_opencv_python3=OFF'
])
return args
| lgpl-2.1 | -4,794,252,325,094,675,000 | 44.017316 | 104 | 0.561689 | false |
gam17/QAD | qad_circle_fun.py | 1 | 63798 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin
funzioni per creare cerchi
-------------------
begin : 2018-04-08
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.core import *
from qgis.gui import *
import qgis.utils
import math
from . import qad_utils
from .qad_geom_relations import *
#============================================================================
# circleFrom3Pts
#============================================================================
def circleFrom3Pts(firstPt, secondPt, thirdPt):
"""
crea un cerchio attraverso:
punto iniziale
secondo punto (intermedio)
punto finale
"""
l = QadLine()
l.set(firstPt, secondPt)
InfinityLinePerpOnMiddle1 = QadPerpendicularity.getInfinityLinePerpOnMiddleLine(l)
l.set(secondPt, thirdPt)
InfinityLinePerpOnMiddle2 = QadPerpendicularity.getInfinityLinePerpOnMiddleLine(l)
if InfinityLinePerpOnMiddle1 is None or InfinityLinePerpOnMiddle2 is None:
return None
center = QadIntersections.twoInfinityLines(InfinityLinePerpOnMiddle1, InfinityLinePerpOnMiddle2)
if center is None: return None # linee parallele
radius = center.distance(firstPt)
return QadCircle().set(center, radius)
#===========================================================================
# circleFrom2IntPtsCircleTanPts
#===========================================================================
def circleFrom2IntPtsCircleTanPts(pt1, pt2, circle, pt):
"""
crea un cerchio attraverso 2 punti di intersezione e un cerchio tangente:
punto1 di intersezione
punto2 di intersezione
cerchio di tangenza (oggetto QadCircle)
punto di selezione cerchio
"""
# http://www.batmath.it/matematica/a_apollonio/ppc.htm
circleList = []
if pt1 == pt2: return None
dist1 = pt1.distance(circle.center) # distanza del punto 1 dal centro
dist2 = pt2.distance(circle.center) # distanza del punto 2 dal centro
# entrambi i punti devono essere esterni o interni a circle
if (dist1 > circle.radius and dist2 < circle.radius) or \
(dist1 < circle.radius and dist2 > circle.radius):
return None
l = QadLine()
l.set(pt1, pt2)
if dist1 == dist2: # l'asse di pt1 e pt2 passa per il centro di circle
if dist1 == circle.radius: # entrambi i punti sono sulla circonferenza di circle
return None
axis = QadPerpendicularity.getInfinityLinePerpOnMiddleLine(l) # asse di pt1 e pt2
intPts = QadIntersections.infinityLineWithCircle(axis, circle) # punti di intersezione tra l'asse e circle
for intPt in intPts:
circleTan = circleFrom3Pts(pt1, pt2, intPt)
if circleTan is not None:
circleList.append(circleTan)
elif dist1 > circle.radius and dist2 > circle.radius : # entrambi i punti sono esterni a circle
# mi ricavo una qualunque circonferenza passante per p1 e p2 ed intersecante circle
circleInt = circleFrom3Pts(pt1, pt2, circle.center)
if circleInt is None: return None
intPts = QadIntersections.twoCircles(circle, circleInt)
l1 = QadLine().set(pt1, pt2)
l2 = QadLine().set(intPts[0], intPts[1])
intPt = QadIntersections.twoInfinityLines(l1, l2)
tanPts = QadTangency.fromPointToCircle(intPt, circle)
for tanPt in tanPts:
circleTan = circleFrom3Pts(pt1, pt2, tanPt)
if circleTan is not None:
circleList.append(circleTan)
elif dist1 < circle.radius and dist2 < circle.radius : # entrambi i punti sono interni a circle
# mi ricavo una qualunque circonferenza passante per p1 e p2 ed intersecante circle
ptMiddle = qad_utils.getMiddlePoint(pt1, pt2)
angle = qad_utils.getAngleBy2Pts(pt1, pt2) + math.pi / 2
pt3 = qad_utils.getPolarPointByPtAngle(ptMiddle, angle, 2 * circle.radius)
circleInt = circleFrom3Pts(pt1, pt2, pt3)
if circleInt is None:
return None
intPts = QadIntersections.twoCircles(circle, circleInt)
l1 = QadLine().set(pt1, pt2)
l2 = QadLine().set(intPts[0], intPts[1])
intPt = QadIntersections.twoInfinityLines(l1, l2)
tanPts = QadTangency.fromPointToCircle(intPt, circle)
for tanPt in tanPts:
circleTan = circleFrom3Pts(pt1, pt2, tanPt)
if circleTan is not None:
circleList.append(circleTan)
elif dist1 == radius: # il punto1 sulla circonferenza di circle
# una sola circonferenza avente come centro l'intersezione tra l'asse pt1 e pt2 e la retta
# passante per il centro di circle e pt1
axis = QadPerpendicularity.getInfinityLinePerpOnMiddleLine(l) # asse di pt1 e pt2
l1 = QadLine().set(circle.center, pt1)
intPt = QadIntersections.twoInfinityLines(axis, l1)
circleTan = QadCircle().set(intPt, qad_utils.getDistance(pt1, intPt))
circleList.append(circleTan)
elif dist2 == radius: # il punto3 é sulla circonferenza di circle
# una sola circonferenza avente come centro l'intersezione tra l'asse pt1 e pt2 e la retta
# passante per il centro di circle e pt2
axis = QadPerpendicularity.getInfinityLinePerpOnMiddleLine(l) # asse di pt1 e pt2
l2 = QadLine().set(circle.center, pt2)
intPt = QadIntersections.twoInfinityLines(axis, l2)
circleTan = QadCircle().set(intPt, qad_utils.getDistance(pt2, intPt))
circleList.append(circleTan)
if len(circleList) == 0:
return None
result = QadCircle()
minDist = sys.float_info.max
for circleTan in circleList:
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle.center)
if qad_utils.getDistance(circleTan.center, circle.center) < circle.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
dist = qad_utils.getDistance(ptInt, pt)
if dist < minDist: # mediamente più vicino
minDist = dist
result.center = circleTan.center
result.radius = circleTan.radius
return result
#===========================================================================
# circleFrom2IntPtsLineTanPts
#===========================================================================
def circleFrom2IntPtsLineTanPts(pt1, pt2, line, pt, AllCircles = False):
"""
crea uno o più cerchi (vedi allCircles) attraverso 2 punti di intersezione e una linea tangente:
punto1 di intersezione
punto2 di intersezione
linea di tangenza (QadLine)
punto di selezione linea
il parametro AllCircles se = True fa restituire tutti i cerchi altrimenti solo quello più vicino a pt1 e pt2
"""
circleList = []
pt1Line = line.getStartPt()
pt2Line = line.getEndPt()
A = (pt1.x() * pt1.x()) + (pt1.y() * pt1.y())
B = (pt2.x() * pt2.x()) + (pt2.y() * pt2.y())
E = - pt1.x() + pt2.x()
F = pt1.y() - pt2.y()
if F == 0:
if AllCircles == True:
return circleList
else:
return None
G = (-A + B) / F
H = E / F
if pt1Line.x() - pt2Line.x() == 0:
# la linea é verticale
e = pt1Line.x()
I = H * H
if I == 0:
if AllCircles == True:
return circleList
else:
return None
J = (2 * G * H) - (4 * e) + (4 * pt2.x()) + (4 * H * pt2.y())
K = (G * G) - (4 * e * e) + (4 * B) + (4 * G * pt2.y())
else:
# equazione della retta line -> y = dx + e
d = (pt2Line.y() - pt1Line.y()) / (pt2Line.x() - pt1Line.x())
e = - d * pt1Line.x() + pt1Line.y()
C = 4 * (1 + d * d)
D = 2 * d * e
d2 = d * d
I = 1 + (H * H * d2) + 2 * H * d
if I == 0:
if AllCircles == True:
return circleList
else:
return None
J = (2 * d2 * G * H) + (2 * D) + (2 * D * H * d) + (2 * G * d) - (e * C * H) + (pt2.x() * C) + H * pt2.y() * C
K = (G * G * d2) + (2 * D * G * d) + (D * D) - (C * e * e) - (C * G * e) + (B * C) + (G * pt2.y() * C)
L = (J * J) - (4 * I * K)
if L < 0:
if AllCircles == True:
return circleList
else:
return None
a1 = (-J + math.sqrt(L)) / (2 * I)
b1 = (a1 * H) + G
c1 = - B - (a1 * pt2.x()) - (b1 * pt2.y())
center = QgsPointXY()
center.setX(- (a1 / 2))
center.setY(- (b1 / 2))
radius = math.sqrt((a1 * a1 / 4) + (b1 * b1 / 4) - c1)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
a2 = (-J - math.sqrt(L)) / (2 * I)
b2 = (a2 * H) + G
c2 = - B - (a2 * pt2.x()) - (b2 * pt2.y())
center.setX(- (a2 / 2))
center.setY(- (b2 / 2))
radius = math.sqrt((a2 * a2 / 4) + (b2 * b2 / 4) - c2)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
if AllCircles == True:
return circleList
if len(circleList) == 0:
return None
result = QadCircle()
minDist = sys.float_info.max
for circle in circleList:
ptInt = QadPerpendicularity.fromPointToInfinityLine(circle.center, line)
dist = ptInt.distance(pt)
if dist < minDist: # mediamente più vicino
minDist = dist
result.center = circle.center
result.radius = circle.radius
return result
#============================================================================
# circleFrom2IntPts1TanPt
#============================================================================
def circleFrom2IntPts1TanPt(pt1, pt2, geom, pt):
"""
crea un cerhcio attraverso 2 punti di intersezione ed un oggetto di tangenza:
punto1 di intersezione
punto2 di intersezione
geometria di tangenza (linea, arco o cerchio)
punto di selezione geometria
"""
objType = geom.whatIs()
if objType != "LINE" and objType != "ARC" and objType != "CIRCLE":
return None
if objType == "ARC": # se è arco lo trasformo in cerchio
obj = QadCircle().set(geom.center, geom.radius)
objType = "CIRCLE"
else:
obj = geom
if objType == "LINE":
return circleFrom2IntPtsLineTanPts(pt1, pt2, obj, pt)
elif objType == "CIRCLE":
return circleFrom2IntPtsCircleTanPts(pt1, pt2, obj, pt)
return None
#============================================================================
# circleFrom1IntPt2TanPts
#============================================================================
def circleFrom1IntPt2TanPts(pt, geom1, pt1, geom2, pt2):
"""
crea un cerchio attraverso 1 punti di intersezione e 2 oggetti di tangenza:
punto di intersezione
geometria1 di tangenza (linea, arco o cerchio)
punto di selezione geometria1
geometria2 di tangenza (linea, arco o cerchio)
punto di selezione geometria2
"""
obj1Type = geom1.whatIs()
obj2Type = geom2.whatIs()
if (obj1Type != "LINE" and obj1Type != "ARC" and obj1Type != "CIRCLE") or \
(obj2Type != "LINE" and obj2Type != "ARC" and obj2Type != "CIRCLE"):
return None
if obj1Type == "ARC": # se è arco lo trasformo in cerchio
obj1 = QadCircle().set(geom1.center, geom1.radius)
obj1Type = "CIRCLE"
else:
obj1 = geom1
if obj2Type == "ARC": # se è arco lo trasformo in cerchio
obj2 = QadCircle().set(geom2.center, geom2.radius)
obj2Type = "CIRCLE"
else:
obj2 = geom2
if obj1Type == "LINE":
if obj2Type == "LINE":
return circleFrom1IntPtLineLineTanPts(pt, obj1, pt1, obj2, pt2)
elif obj2Type == "CIRCLE":
return circleFrom1IntPtLineCircleTanPts(pt, obj1, pt1, obj2, pt2)
elif obj1Type == "CIRCLE":
if obj2Type == "LINE":
return circleFrom1IntPtLineCircleTanPts(pt, obj2, pt2, obj1, pt1)
elif obj2Type == "CIRCLE":
return circleFrom1IntPtCircleCircleTanPts(pt, obj1, pt1, obj2, pt2)
return None
#===========================================================================
# circleFrom1IntPtLineLineTanPts
#===========================================================================
def circleFrom1IntPtLineLineTanPts(pt, line1, pt1, line2, pt2, AllCircles = False):
"""
crea uno o più cerchi (vedi allCircles) attraverso 1 punti di intersezione e due linee tangenti:
punto di intersezione
linea1 di tangenza (QLine)
punto di selezione linea1
linea2 di tangenza (QLine)
punto di selezione linea2
il parametro AllCircles se = True fa restituire tutti i cerchi e non sono quello più vicino a pt1 e pt2
"""
# http://www.batmath.it/matematica/a_apollonio/prr.htm
circleList = []
# verifico se le rette sono parallele
ptInt = QadIntersections.twoInfinityLines(line1, line2)
if ptInt is None: # le rette sono parallele
# Se le rette sono parallele il problema ha soluzioni solo se il punto
# é non esterno alla striscia individuata dalle due rette e basta considerare
# il simmetrico di A rispetto alla bisettrice della striscia.
ptPerp = QadPerpendicularity.fromPointToInfinityLine(line2.getStartPt(), line1)
angle = qad_utils.getAngleBy2Pts(line2.getStartPt(), ptPerp)
dist = qad_utils.getDistance(line2.getStartPt(), ptPerp)
pt1ParLine = qad_utils.getPolarPointByPtAngle(line2.getStartPt(), angle, dist / 2)
angle = angle + math.pi / 2
pt2ParLine = qad_utils.getPolarPointByPtAngle(pt1ParLine, angle, dist)
l = QadLine().set(pt1ParLine, pt2ParLine)
ptPerp = QadPerpendicularity.fromPointToInfinityLine(pt, l)
dist = qad_utils.getDistance(pt, ptPerp)
# trovo il punto simmetrico
angle = qad_utils.getAngleBy2Pts(pt, ptPerp)
ptSymmetric = qad_utils.getPolarPointByPtAngle(pt, angle, dist * 2)
return circleFrom2IntPtsLineTanPts(pt, ptSymmetric, line1, pt1, AllCircles)
else: # le rette non sono parallele
if ptInt == pt:
return None
# se il punto é sulla linea1 o sulla linea2
ptPerp1 = QadPerpendicularity.fromPointToInfinityLine(pt, line1)
ptPerp2 = QadPerpendicularity.fromPointToInfinityLine(pt, line2)
if ptPerp1 == pt or ptPerp2 == pt:
# Se le rette sono incidenti ed il punto appartiene ad una delle due la costruzione
# é quasi immediata: basta tracciare le bisettrici dei due angoli individuati dalle rette
# e la perpendicolare per pt alla retta cui appartiene pt stesso. Si avranno due circonferenze.
if ptPerp1 == pt: # se il punto é sulla linea1
angle = qad_utils.getAngleBy2Pts(line2.getStartPt(), line2.getEndPt())
ptLine = qad_utils.getPolarPointByPtAngle(ptInt, angle, 10)
Bisector1 = qad_utils.getBisectorInfinityLine(pt, ptInt, ptLine)
ptLine = qad_utils.getPolarPointByPtAngle(ptInt, angle + math.pi, 10)
Bisector2 = qad_utils.getBisectorInfinityLine(pt, ptInt, ptLine)
angle = qad_utils.getAngleBy2Pts(line1.getStartPt(), line1.getEndPt())
ptPerp = qad_utils.getPolarPointByPtAngle(pt, angle + math.pi / 2, 10)
else: # se il punto é sulla linea2
angle = qad_utils.getAngleBy2Pts(line1.getStartPt(), line1.getEndPt())
ptLine = qad_utils.getPolarPointByPtAngle(ptInt, angle, 10)
Bisector1 = qad_utils.getBisectorInfinityLine(pt, ptInt, ptLine)
ptLine = qad_utils.getPolarPointByPtAngle(ptInt, angle + math.pi, 10)
Bisector2 = qad_utils.getBisectorInfinityLine(pt, ptInt, ptLine)
angle = qad_utils.getAngleBy2Pts(line2.getStartPt(), line2.getEndPt())
ptPerp = qad_utils.getPolarPointByPtAngle(pt, angle + math.pi / 2, 10)
l1 = QadLine().set(Bisector1[0], Bisector1[1])
l2 = QadLine().set(pt, ptPerp)
center = QadIntersections.twoInfinityLines(l1, l2)
radius = qad_utils.getDistance(pt, center)
circleTan = QadCircle()
circleTan.set(center, radius)
circleList.append(circleTan)
l1.set(Bisector2[0], Bisector2[1])
center = QadIntersections.twoInfinityLines(l1, l2)
radius = qad_utils.getDistance(pt, center)
circleTan = QadCircle()
circleTan.set(center, radius)
circleList.append(circleTan)
else:
# Bisettrice dell'angolo interno del triangolo avente come vertice i punti di intersezione delle rette
Bisector = qad_utils.getBisectorInfinityLine(ptPerp1, ptInt, ptPerp2)
l = QadLine().set(Bisector[0], Bisector[1])
ptPerp = QadPerpendicularity.fromPointToInfinityLine(pt, l)
dist = qad_utils.getDistance(pt, ptPerp)
# trovo il punto simmetrico
angle = qad_utils.getAngleBy2Pts(pt, ptPerp)
ptSymmetric = qad_utils.getPolarPointByPtAngle(pt, angle, dist * 2)
return circleFrom2IntPtsLineTanPts(pt, ptSymmetric, line1, pt1, AllCircles)
if AllCircles == True:
return circleList
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line1)
AvgList.append(qad_utils.getDistance(ptInt, pt1))
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line2)
AvgList.append(qad_utils.getDistance(ptInt, pt2))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#===============================================================================
# solveCircleTangentTo2LinesAndCircle
#===============================================================================
def solveCircleTangentTo2LinesAndCircle(line1, line2, circle, s1, s2):
'''
Trova i due cerchi tangenti a due rette e un cerchio (sarebbero 8 cerchi che si trovano con le
4 combinazioni di s1, s2 che assumo valore -1 o 1)
e restituisce quello più vicino a pt
'''
circleList = []
# http://www.batmath.it/matematica/a_apollonio/rrc.htm
# Questa costruzione utilizza una particolare trasformazione geometrica, che alcuni chiamano dilatazione parallela:
# si immagina che il raggio r del cerchio dato c si riduca a zero (il cerchio é ridotto al suo centro),
# mentre le rette rimangono parallele con distanze dal centro del cerchio che si é ridotto a zero aumentate o
# diminuite di r. Si é così ricondotti al caso di un punto e due rette e si può applicare una delle tecniche viste
# in quel caso.
line1Par = []
angle = qad_utils.getAngleBy2Pts(line1.getStartPt(), line1.getEndPt())
line1Par.append(qad_utils.getPolarPointByPtAngle(line1[0], angle + math.pi / 2, circle.radius * s1))
line1Par.append(qad_utils.getPolarPointByPtAngle(line1.getEndPt(), angle + math.pi / 2, circle.radius * s1))
line2Par = []
angle = qad_utils.getAngleBy2Pts(line2.getStartPt(), line2.getEndPt())
line2Par.append(qad_utils.getPolarPointByPtAngle(line2.getStartPt(), angle + math.pi / 2, circle.radius * s2))
line2Par.append(qad_utils.getPolarPointByPtAngle(line2.getEndPt(), angle + math.pi / 2, circle.radius * s2))
circleList = circleFrom1IntPtLineLineTanPts(circle.center, line1Par, None, line2Par, None, True)
for circleTan in circleList:
ptPerp = qad_utils.getPerpendicularPointOnInfinityLine(line1.getStartPt(), line1.getEndPt(), circleTan.center)
circleTan.radius = qad_utils.getDistance(ptPerp, circleTan.center)
return circleList
#============================================================================
# circleFromLineLineCircleTanPts
#============================================================================
def circleFromLineLineCircleTanPts(line1, pt1, line2, pt2, circle, pt3):
"""
crea un cerchio attraverso tre linee:
linea1 di tangenza (QadLine)
punto di selezione linea1
linea2 di tangenza (QadLine)
punto di selezione linea2
cerchio di tangenza (oggetto QadCircle)
punto di selezione cerchio
"""
circleList = []
circleList.extend(solveCircleTangentTo2LinesAndCircle(line1, line2, circle, -1, -1))
circleList.extend(solveCircleTangentTo2LinesAndCircle(line1, line2, circle, -1, 1))
circleList.extend(solveCircleTangentTo2LinesAndCircle(line1, line2, circle, 1, -1))
circleList.extend(solveCircleTangentTo2LinesAndCircle(line1, line2, circle, 1, 1))
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
ptInt = qad_utils.getPerpendicularPointOnInfinityLine(line1.getStartPt(), line1.getEndPt(), circleTan.center)
AvgList.append(ptInt.distance(pt1))
ptInt = qad_utils.getPerpendicularPointOnInfinityLine(line2.getStartPt(), line2.getEndPt(), circleTan.center)
AvgList.append(ptInt.distance(pt2))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle.center)
if circleTan.center.distance(circle.center) < circle.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt3))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return True
#============================================================================
# circleFrom3TanPts
#============================================================================
def circleFrom3TanPts(geom1, pt1, geom2, pt2, geom3, pt3):
"""
crea un cerchio attraverso tre oggetti di tangenza per le estremità del diametro:
geometria 1 di tangenza (linea, arco o cerchio)
punto di selezione geometria 1
geometria 2 di tangenza (linea, arco o cerchio)
punto di selezione geometria 2
"""
obj1Type = geom1.whatIs()
obj2Type = geom2.whatIs()
obj3Type = geom3.whatIs()
if (obj1Type != "LINE" and obj1Type != "ARC" and obj1Type != "CIRCLE") or \
(obj2Type != "LINE" and obj2Type != "ARC" and obj2Type != "CIRCLE") or \
(obj3Type != "LINE" and obj3Type != "ARC" and obj3Type != "CIRCLE"):
return None
if obj1Type == "ARC": # se è arco lo trasformo in cerchio
obj1 = QadCircle().set(geom1.center, geom1.radius)
obj1Type = "CIRCLE"
else:
obj1 = geom1
if obj2Type == "ARC": # se è arco lo trasformo in cerchio
obj2 = QadCircle().set(geom2.center, geom2.radius)
obj2Type = "CIRCLE"
else:
obj2 = geom2
if obj3Type == "ARC": # se è arco lo trasformo in cerchio
obj3 = QadCircle().set(geom3.center, geom3.radius)
obj3Type = "CIRCLE"
else:
obj3 = geom3
if obj1Type == "LINE":
if obj2Type == "LINE":
if obj3Type == "LINE":
return circleFromLineLineLineTanPts(obj1, pt1, obj2, pt2, obj3, pt3)
elif obj3Type == "CIRCLE":
return circleFromLineLineCircleTanPts(obj1, pt1, obj2, pt2, obj3, pt3)
elif obj2Type == "CIRCLE":
if obj3Type == "LINE":
return circleFromLineLineCircleTanPts(obj1, pt1, obj3, pt3, obj2, pt2)
elif obj3Type == "CIRCLE":
return circleFromLineCircleCircleTanPts(obj1, pt1, obj2, pt2, obj3, pt3)
elif obj1Type == "CIRCLE":
if obj2Type == "LINE":
if obj3Type == "LINE":
return circleFromLineLineCircleTanPts(obj2, pt2, obj3, pt3, obj1, pt1)
elif obj3Type == "CIRCLE":
return circleFromLineCircleCircleTanPts(obj2, pt2, obj1, pt1, obj3, pt3)
elif obj2Type == "CIRCLE":
if obj3Type == "LINE":
return circleFromLineCircleCircleTanPts(obj3, pt3, obj1, pt1, obj2, pt2)
elif obj3Type == "CIRCLE":
return circleFromCircleCircleCircleTanPts(obj1, pt1, obj2, pt2, obj3, pt3)
return None
#============================================================================
# circleFromLineLineLineTanPts
#============================================================================
def circleFromLineLineLineTanPts(line1, pt1, line2, pt2, line3, pt3):
"""
Crea un cerchio attraverso tre linee:
linea1 di tangenza (QadLine)
punto di selezione linea1
linea2 di tangenza (QadLine)
punto di selezione linea2
linea3 di tangenza (QadLine)
punto di selezione linea3
"""
circleList = []
# Punti di intersezione delle rette (line1, line2, line3)
ptInt1 = QadIntersections.twoInfinityLines(line1, line2)
ptInt2 = QadIntersections.twoInfinityLines(line2, line3)
ptInt3 = QadIntersections.twoInfinityLines(line3, line1)
# tre rette parallele
if (ptInt1 is None) and (ptInt2 is None):
return circleList
if (ptInt1 is None): # la linea1 e linea2 sono parallele
circleList.extend(circleFrom2ParLinesLineTanPts(line1, line2, line3))
elif (ptInt2 is None): # la linea2 e linea3 sono parallele
circleList.extend(circleFrom2ParLinesLineTanPts(line2, line3, line1))
elif (ptInt3 is None): # la linea3 e linea1 sono parallele
circleList.extend(circleFrom2ParLinesLineTanPts(line3, line1, line2))
else:
# Bisettrici degli angoli interni del triangolo avente come vertici i punti di intersezione delle rette
Bisector123 = qad_utils.getBisectorInfinityLine(ptInt1, ptInt2, ptInt3)
Bisector231 = qad_utils.getBisectorInfinityLine(ptInt2, ptInt3, ptInt1)
Bisector312 = qad_utils.getBisectorInfinityLine(ptInt3, ptInt1, ptInt2)
# Punto di intersezione delle bisettrici = centro delle circonferenza inscritta al triangolo
l1 = QadLine().set(Bisector123[0], Bisector123[1])
l2 = QadLine().set(Bisector231[0], Bisector231[1])
center = QadIntersections.twoInfinityLines(l1, l2)
# Perpendicolari alle rette line1 passanti per il centro della circonferenza inscritta
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, line1)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
# Bisettrici degli angoli esterni del triangolo
angle = qad_utils.getAngleBy2Pts(Bisector123[0], Bisector123[1]) + math.pi / 2
Bisector123 = QadLine().set(ptInt2, qad_utils.getPolarPointByPtAngle(ptInt2, angle, 10))
angle = qad_utils.getAngleBy2Pts(Bisector231[0], Bisector231[1]) + math.pi / 2
Bisector231 = QadLine().set(ptInt3, qad_utils.getPolarPointByPtAngle(ptInt3, angle, 10))
angle = qad_utils.getAngleBy2Pts(Bisector312[0], Bisector312[1]) + math.pi / 2
Bisector312 = QadLine().set(ptInt1, qad_utils.getPolarPointByPtAngle(ptInt1, angle, 10))
# Punti di intersezione delle bisettrici = centro delle circonferenze ex-inscritte
center = QadIntersections.twoInfinityLines(Bisector123, Bisector231)
l = QadLine().set(ptInt2, ptInt3)
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, l)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
center = QadIntersections.twoInfinityLines(Bisector231, Bisector312)
l.set(ptInt3, ptInt1)
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, l)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
center = QadIntersections.twoInfinityLines(Bisector312, Bisector123)
l.set(ptInt1, ptInt2)
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, l)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line1)
AvgList.append(ptInt.distance(pt1))
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line2)
AvgList.append(ptInt.distance(pt2))
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line3)
AvgList.append(ptInt.distance(pt3))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#===========================================================================
# circleFrom2ParLinesLineTanPts
#===========================================================================
def circleFrom2ParLinesLineTanPts(parLine1, parLine2, line3):
"""
Crea due cerchi attraverso 2 linee parallele e una terza linea non parallela:
linea1 di tangenza (QadLine) parallela a linea2
linea2 di tangenza (QadLine) parallela a linea1
linea3 di tangenza (QadLine)
"""
circleList = []
ptInt2 = QadIntersections.twoInfinityLines(parLine2, line3)
ptInt3 = QadIntersections.twoInfinityLines(line3, parLine1)
if parLine1.getStartPt() == ptInt3:
pt = parLine1.getEndPt()
else:
pt = parLine1.getStartPt()
Bisector123 = qad_utils.getBisectorInfinityLine(pt, ptInt2, ptInt3)
if parLine2.getStartPt() == ptInt2:
pt = parLine2.getEndPt()
else:
pt = parLine2.getStartPt()
Bisector312 = qad_utils.getBisectorInfinityLine(pt, ptInt3, ptInt2)
# Punto di intersezione delle bisettrici = centro delle circonferenza
center = qad_utils.getIntersectionPointOn2InfinityLines(Bisector123[0], Bisector123[1], \
Bisector312[0], Bisector312[1])
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, parLine1)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
# Bisettrici degli angoli esterni
Bisector123 = Bisector123 + math.pi / 2
Bisector312 = Bisector312 + math.pi / 2
# Punto di intersezione delle bisettrici = centro delle circonferenza
center = qad_utils.getIntersectionPointOn2InfinityLines(Bisector123[0], Bisector123[1], \
Bisector312[0], Bisector312[1])
ptPer = QadPerpendicularity.fromPointToInfinityLine(center, parLine1)
radius = center.distance(ptPer)
circle = QadCircle()
circle.set(center, radius)
circleList.append(circle)
return circleList
#============================================================================
# circleFromLineCircleCircleTanPts
#============================================================================
def circleFromLineCircleCircleTanPts(line, pt, circle1, pt1, circle2, pt2):
"""
setta le caratteristiche del cerchio attraverso tre linee:
linea di tangenza (QadLine)
punto di selezione linea
cerchio1 di tangenza (oggetto QadCircle)
punto di selezione cerchio1
cerchio2 di tangenza (oggetto QadCircle)
punto di selezione cerchio2
"""
circleList = []
circleList.extend(solveCircleTangentToLineAnd2Circles(line, circle1, circle2, -1, -1))
circleList.extend(solveCircleTangentToLineAnd2Circles(line, circle1, circle2, -1, 1))
circleList.extend(solveCircleTangentToLineAnd2Circles(line, circle1, circle2, 1, -1))
circleList.extend(solveCircleTangentToLineAnd2Circles(line, circle1, circle2, 1, 1))
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line)
AvgList.append(ptInt.distance(t))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle1.center)
if circleTan.center.distance(circle1.center) < circle1.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt1))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle2.center)
if circleTan.center.distance(circle2.center) < circle2.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt2))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#============================================================================
# circleFromCircleCircleCircleTanPts
#============================================================================
def circleFromCircleCircleCircleTanPts(circle1, pt1, circle2, pt2, circle3, pt3):
"""
Crea un cerchio attraverso tre cerchi tangenti:
cerchio1 di tangenza (oggetto QadCircle)
punto di selezione cerchio1
cerchio2 di tangenza (oggetto QadCircle)
punto di selezione cerchio2
cerchio3 di tangenza (oggetto QadCircle)
punto di selezione cerchio3
"""
circleList = []
circle = solveApollonius(circle1, circle2, circle3, -1, -1, -1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, -1, -1, 1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, -1, 1, -1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, -1, 1, 1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, 1, -1, -1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, 1, -1, 1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, 1, 1, -1)
if circle is not None:
circleList.append(circle)
circle = solveApollonius(circle1, circle2, circle3, 1, 1, 1)
if circle is not None:
circleList.append(circle)
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle1.center)
if circleTan.center.distance(circle1.center) < circle1.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt1))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle2.center)
if circleTan.center.distance(circle2.center) < circle2.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt2))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle3.center)
if circleTan.center.distance(circle3.center) < circle3.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(ptInt.distance(pt3))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#===========================================================================
# circleFrom1IntPtLineCircleTanPts
#===========================================================================
def circleFrom1IntPtLineCircleTanPts(pt, line1, pt1, circle2, pt2, AllCircles = False):
"""
crea uno o più cerchi (vedi AllCircles) attraverso 1 punto di intersezione, 1 linea e 1 cerchio tangenti:
punto di intersezione
linea di tangenza (QadLine)
punto di selezione linea
cerchio di tangenza (QadLine)
punto di selezione cerchio
il parametro AllCircles se = True fa restituire tutti i cerchi e non sono quello più vicino a pt1 e pt2
"""
# http://www.batmath.it/matematica/a_apollonio/prc.htm
circleList = []
# Sono dati un cerchio circle2, un punto pt ed una retta line1 nell'ipotesi che pt
# non stia nè sulla retta line1 nè sul circolo.
# Si vogliono trovare le circonferenze passanti per il punto e tangenti alla retta e al cerchio dato.
# Il problema si può risolvere facilmente utilizzando un'inversione di centro pt e raggio qualunque.
# Trovate le circonferenze inverse della retta data e del circolo dato, se ne trovano le tangenti comuni.
# Le inverse di queste tangenti comuni sono le circonferenze cercate.
if line1.getYOnInfinityLine(pt.x()) == pt.y() or \
qad_utils.getDistance(pt, circle2.center) == circle2.radius:
if AllCircles == True:
return circleList
else:
return None
c = QadCircle()
c.set(pt, 10)
circularInvLine = getCircularInversionOfLine(c, line1)
circularInvCircle = getCircularInversionOfCircle(c, circle2)
tangents = QadTangency.twoCircles(circularInvCircle, circularInvLine)
for tangent in tangents:
circleList.append(getCircularInversionOfLine(c, tangent))
if AllCircles == True:
return circleList
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
ptInt = QadPerpendicularity.fromPointToInfinityLine(circleTan.center, line1)
AvgList.append(qad_utils.getDistance(ptInt, pt1))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle2.center)
if qad_utils.getDistance(circleTan.center, circle2.center) < circle2.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(qad_utils.getDistance(ptInt, pt2))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#===========================================================================
# circleFrom1IntPtCircleCircleTanPts
#===========================================================================
def circleFrom1IntPtCircleCircleTanPts(pt, circle1, pt1, circle2, pt2):
"""
Crea dei cerchi attraverso 1 punto di intersezione, 2 cerchi tangenti:
punto di intersezione
cerchio1 di tangenza (oggetto QadCircle)
punto di selezione cerchio1
cerchio2 di tangenza (oggetto QadCircle)
punto di selezione cerchio2
"""
# http://www.batmath.it/matematica/a_apollonio/prc.htm
circleList = []
# Sono dati un punto pt e due circonferenze circle1 e circle2;
# si devono determinare le circonferenze passanti per pt e tangenti alle due circonferenze.
# Proponiamo una costruzione che utilizza l'inversione, in quanto ci pare la più elegante.
# In realtà si potrebbe anche fare una costruzione utilizzando i centri di omotetia dei due cerchi dati
# ma, nella sostanza, é solo un modo per mascherare l'uso dell'inversione.
# Si considera un circolo di inversione di centro pt e raggio qualunque.
# Si determinano i circoli inversi dei due circoli dati e le loro tangenti comuni.
# Le circonferenze inverse di queste tangenti comuni sono quelle che soddisfano il problema.
c = QadCircle()
c.set(pt, 10)
circularInvCircle1 = getCircularInversionOfCircle(c, circle1)
circularInvCircle2 = getCircularInversionOfCircle(c, circle2)
tangents = QadTangency.twoCircles(circularInvCircle1, circularInvCircle2)
for tangent in tangents:
circleList.append(getCircularInversionOfLine(c, tangent))
if len(circleList) == 0:
return None
result = QadCircle()
AvgList = []
Avg = sys.float_info.max
for circleTan in circleList:
del AvgList[:] # svuoto la lista
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle1.center)
if qad_utils.getDistance(circleTan.center, circle1.center) < circle1.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(qad_utils.getDistance(ptInt, pt1))
angle = qad_utils.getAngleBy2Pts(circleTan.center, circle2.center)
if qad_utils.getDistance(circleTan.center, circle2.center) < circle2.radius: # cerchio interno
angle = angle + math.pi / 2
ptInt = qad_utils.getPolarPointByPtAngle(circleTan.center, angle, circleTan.radius)
AvgList.append(qad_utils.getDistance(ptInt, pt2))
currAvg = qad_utils.numericListAvg(AvgList)
if currAvg < Avg: # mediamente più vicino
Avg = currAvg
result.center = circleTan.center
result.radius = circleTan.radius
return result
#============================================================================
# circleFromDiamEndsPtTanPt
#============================================================================
def circleFromDiamEndsPtTanPt(startPt, geom, pt):
"""
Crea un cerchio attraverso un punto di estremità del diametro e
un oggetto di tangenza per l'altra estremità :
punto iniziale
geometria 1 di tangenza (linea, arco o cerchio)
punto di selezione geometria 1
"""
objype = geom.whatIs()
if (objType != "LINE" and objType != "ARC" and objType != "CIRCLE"): return None
if objType == "ARC": # se è arco lo trasformo in cerchio
obj = QadCircle().set(geom.center, geom.radius)
objType = "CIRCLE"
else:
obj = geom
if objType == "LINE":
ptPer = QadPerpendicularity.fromPointToInfinityLine(startPt, obj)
return QadCircle().fromDiamEnds(startPt, ptPer)
elif objType == "CIRCLE":
l = QadLine().set(startPt, obj.center)
intPts = QadIntersections.infinityLineWithCircle(l, obj)
# scelgo il punto più vicino al punto pt
ptTan = qad_utils.getNearestPoints(pt, ptIntList)[0]
return QadCircle().fromDiamEnds(startPt, ptTan)
#============================================================================
# circleFromDiamEnds2TanPts
#============================================================================
def circleFromDiamEnds2TanPts(geom1, pt1, geom2, pt2):
"""
Creo un cerchio attraverso due oggetto di tangenza per le estremità del diametro:
geometria1 di tangenza (linea, arco o cerchio)
punto di selezione geometria1
geometria2 di tangenza (linea, arco o cerchio)
punto di selezione geometria2
"""
obj1Type = geom1.whatIs()
obj2Type = geom2.whatIs()
if (obj1Type != "LINE" and obj1Type != "ARC" and obj1Type != "CIRCLE") or \
(obj2Type != "LINE" and obj2Type != "ARC" and obj2Type != "CIRCLE"):
return None
if obj1Type == "ARC": # se è arco lo trasformo in cerchio
obj1 = QadCircle().set(geom1.center, geom1.radius)
obj1Type = "CIRCLE"
else:
obj1 = geom1
if obj2Type == "ARC": # se è arco lo trasformo in cerchio
obj2 = QadCircle().set(geom2.center, geom2.radius)
obj2Type = "CIRCLE"
else:
obj2 = geom2
if obj1Type == "LINE":
if obj2Type == "LINE":
return None # Il diametro non può essere tangente a due linee
elif obj2Type == "CIRCLE":
return circleFromLineCircleTanPts(obj1, obj2, pt2)
elif obj1Type == "CIRCLE":
if obj2Type == "LINE":
return circleFromLineCircleTanPts(obj2, obj1, pt1)
elif obj2Type == "CIRCLE":
return circleFromCircleCircleTanPts(obj1, pt1, obj2, pt2)
return None
#============================================================================
# circleFromLineCircleTanPts
#============================================================================
def circleFromLineCircleTanPts(line, circle, ptCircle):
"""
Creo un cerchio attraverso una linea, un cerchio di tangenza:
linea di tangenza (QadLine)
cerchio di tangenza (oggetto QadCircle)
punto di selezione cerchio
"""
ptPer = QadPerpendicularity.fromPointToInfinityLine(circle.center, line)
tanPoints = []
tanPoints.append(qad_utils.getPolarPointBy2Pts(circle.center, ptPer, circle.radius))
tanPoints.append(qad_utils.getPolarPointBy2Pts(circle.center, ptPer, -circle.radius))
# scelgo il punto più vicino al punto pt
ptTan = qad_utils.getNearestPoints(ptCircle, tanPoints)[0]
return QadCircle().fromDiamEnds(ptPer, ptTan)
#============================================================================
# circleFromCircleCircleTanPts
#============================================================================
def circleFromCircleCircleTanPts(circle1, pt1, circle2, pt2):
"""
Crea un cerchio attraverso due cerchi di tangenza:
cerchio1 di tangenza (oggetto QadCircle)
punto di selezione cerchio1
cerchio2 di tangenza (oggetto QadCircle)
punto di selezione cerchio2
"""
l = QadLine().set(circle1.center, circle2.center)
ptIntList = QadIntersections.infinityLineWithCircle(l, circle1)
# scelgo il punto più vicino al punto pt1
ptTan1 = qad_utils.getNearestPoints(pt1, ptIntList)[0]
ptIntList = QadIntersections.infinityLineWithCircle(l, circle2)
# scelgo il punto più vicino al punto pt2
ptTan2 = qad_utils.getNearestPoints(pt2, ptIntList)[0]
return QadCircle().fromDiamEnds(ptTan1, ptTan2)
#============================================================================
# circleFrom2TanPtsRadius
#============================================================================
def circleFrom2TanPtsRadius(geom1, pt1, geom2, pt2, radius):
"""
Crea un cerchio attraverso 2 oggetti di tangenza e un raggio:
geometria1 di tangenza (linea, arco o cerchio)
punto di selezione geometria1
oggetto2 di tangenza (linea, arco o cerchio)
punto di selezione geometria2
raggio
"""
obj1Type = geom1.whatIs()
obj2Type = geom2.whatIs()
if (obj1Type != "LINE" and obj1Type != "ARC" and obj1Type != "CIRCLE") or \
(obj2Type != "LINE" and obj2Type != "ARC" and obj2Type != "CIRCLE"):
return False
if obj1Type == "ARC": # se è arco lo trasformo in cerchio
obj1 = QadCircle().set(geom1.center, geom1.radius)
obj1Type = "CIRCLE"
else:
obj1 = geom1
if obj2Type == "ARC": # se è arco lo trasformo in cerchio
obj2 = QadCircle().set(geom2.center, geom2.radius)
obj2Type = "CIRCLE"
else:
obj2 = geom2
if obj1Type == "LINE":
if obj2Type == "LINE":
return circleFromLineLineTanPtsRadius(obj1, pt1, obj2, pt2, radius)
elif obj2Type == "CIRCLE":
return circleFromLineCircleTanPtsRadius(obj1, pt1, obj2, pt2, radius)
elif obj1Type == "CIRCLE":
if obj2Type == "LINE":
return circleFromLineCircleTanPtsRadius(obj2, pt2, obj1, pt1, radius)
elif obj2Type == "CIRCLE":
return circleFromCircleCircleTanPtsRadius(obj1, pt1, obj2, pt2, radius)
return None
#============================================================================
# circleFromLineLineTanPtsRadius
#============================================================================
def circleFromLineLineTanPtsRadius(line1, pt1, line2, pt2, radius):
"""
Crea un cerchio attraverso due linee di tangenza e un raggio:
linea1 di tangenza (QadLine)
punto di selezione linea1
linea2 di tangenza (QadLine)
punto di selezione linea2
raggio
"""
# calcolo il punto medio tra i due punti di selezione
ptMiddle = qad_utils.getMiddlePoint(pt1, pt2)
# verifico se le rette sono parallele
ptInt = QadIntersections.twoInfinityLines(line1, line2)
if ptInt is None: # le rette sono parallele
ptPer = QadPerpendicularity.fromPointToInfinityLine(ptMiddle, line1)
if qad_utils.doubleNear(radius, qad_utils.getDistance(ptPer, ptMiddle)):
return QadCircle().set(ptMiddle, radius)
else:
return None
# angolo linea1
angle = qad_utils.getAngleBy2Pts(line1.getStartPt(), line1.getEndPt())
# retta parallela da un lato della linea1 distante radius
angle = angle + math.pi / 2
pt1Par1Line1 = qad_utils.getPolarPointByPtAngle(line1.getStartPt(), angle, radius)
pt2Par1Line1 = qad_utils.getPolarPointByPtAngle(line1.getEndPt(), angle, radius)
# retta parallela dall'altro lato della linea1 distante radius
angle = angle - math.pi
pt1Par2Line1 = qad_utils.getPolarPointByPtAngle(line1.getStartPt(), angle, radius)
pt2Par2Line1 = qad_utils.getPolarPointByPtAngle(line1.getEndPt(), angle, radius)
# angolo linea2
angle = qad_utils.getAngleBy2Pts(line2.getStartPt(), line2.getEndPt())
# retta parallela da un lato della linea2 distante radius
angle = angle + math.pi / 2
pt1Par1Line2 = qad_utils.getPolarPointByPtAngle(line2.getStartPt(), angle, radius)
pt2Par1Line2 = qad_utils.getPolarPointByPtAngle(line2.getEndPt(), angle, radius)
# retta parallela dall'altro lato della linea2 distante radius
angle = angle - math.pi
pt1Par2Line2 = qad_utils.getPolarPointByPtAngle(line2.getStartPt(), angle, radius)
pt2Par2Line2 = qad_utils.getPolarPointByPtAngle(line2.getEndPt(), angle, radius)
# calcolo le intersezioni
ptIntList = []
ptInt = qad_utils.getIntersectionPointOn2InfinityLines(pt1Par1Line1, pt2Par1Line1, \
pt1Par1Line2, pt2Par1Line2)
ptIntList.append(ptInt)
ptInt = qad_utils.getIntersectionPointOn2InfinityLines(pt1Par1Line1, pt2Par1Line1, \
pt1Par2Line2, pt2Par2Line2)
ptIntList.append(ptInt)
ptInt = qad_utils.getIntersectionPointOn2InfinityLines(pt1Par2Line1, pt2Par2Line1, \
pt1Par1Line2, pt2Par1Line2)
ptIntList.append(ptInt)
ptInt = qad_utils.getIntersectionPointOn2InfinityLines(pt1Par2Line1, pt2Par2Line1, \
pt1Par2Line2, pt2Par2Line2)
ptIntList.append(ptInt)
# scelgo il punto più vicino al punto medio
center = qad_utils.getNearestPoints(ptMiddle, ptIntList)[0]
return QadCircle().set(center, radius)
#============================================================================
# circleFromLineCircleTanPtsRadius
#============================================================================
def circleFromLineCircleTanPtsRadius(line, ptLine, circle, ptCircle, radius):
"""
Crea un cerchio attraverso una linea, un cerchio di tangenza e un raggio:
linea di tangenza (QadLine)
punto di selezione linea
cerchio di tangenza (oggetto QadCircle)
punto di selezione cerchio
raggio
"""
# calcolo il punto medio tra i due punti di selezione
ptMiddle = qad_utils.getMiddlePoint(ptLine, ptCircle)
# angolo linea1
angle = qad_utils.getAngleBy2Pts(line.getStartPt(), line.getEndPt())
# retta parallela da un lato della linea1 distante radius
angle = angle + math.pi / 2
pt1Par1Line = qad_utils.getPolarPointByPtAngle(line.getStartPt(), angle, radius)
pt2Par1Line = qad_utils.getPolarPointByPtAngle(line.getEndPt(), angle, radius)
# retta parallela dall'altro lato della linea1 distante radius
angle = angle - math.pi
pt1Par2Line = qad_utils.getPolarPointByPtAngle(line.getStartPt(), angle, radius)
pt2Par2Line = qad_utils.getPolarPointByPtAngle(line.getEndPt(), angle, radius)
# creo un cerchio con un raggio + grande
circleTan = QadCircle()
circleTan.set(circle.center, circle.radius + radius)
l = QadLine().set(pt1Par1Line, pt2Par1Line)
ptIntList = QadIntersections.infinityLineWithCircle(l, circleTan)
l.set(pt1Par2Line, pt2Par2Line)
ptIntList2 = QadIntersections.infinityLineWithCircle(l, circleTan)
ptIntList.extend(ptIntList2)
if len(ptIntList) == 0: # nessuna intersezione
return None
# scelgo il punto più vicino al punto medio
center = qad_utils.getNearestPoints(ptMiddle, ptIntList)[0]
return QadCircle().set(center, radius)
#============================================================================
# circleFromCircleCircleTanPtsRadius
#============================================================================
def circleFromCircleCircleTanPtsRadius(circle1, pt1, circle2, pt2, radius):
"""
Crea un cerchio attraverso due cerchi di tangenza e un raggio:
cerchio1 di tangenza (oggetto QadCircle)
punto di selezione cerchio1
cerchio2 di tangenza (oggetto QadCircle)
punto di selezione cerchio2
raggio
"""
# calcolo il punto medio tra i due punti di selezione
ptMiddle = qad_utils.getMiddlePoint(pt1, pt2)
# creo due cerchi con un raggio + grande
circle1Tan = QadCircle()
circle1Tan.set(circle1.center, circle1.radius + radius)
circle2Tan = QadCircle()
circle2Tan.set(circle2.center, circle2.radius + radius)
ptIntList = QadIntersections.twoCircles(circle1Tan, circle2Tan)
if len(ptIntList) == 0: # nessuna intersezione
return None
# scelgo il punto più vicino al punto medio
center = qad_utils.getNearestPoints(ptMiddle, ptIntList)[0]
return QadCircle().set(center, radius)
#===============================================================================
# solveCircleTangentToLineAnd2Circles
#===============================================================================
def solveCircleTangentToLineAnd2Circles(line, circle1, circle2, s1, s2):
'''
Trova i due cerchi tangenti a una retta e due cerchi (sarebbero 8 cerchi che si trovano con le
4 combinazioni di s1, s2 che assumo valore -1 o 1)
e restituisce quello più vicino a pt
'''
# http://www.batmath.it/matematica/a_apollonio/rcc.htm
# Il modo più semplice per risolvere questo problema é quello di utilizzare una particolare
# trasformazione geometrica, che alcuni chiamano dilatazione parallela: si immagina che il raggio r
# del più piccolo dei cerchi in questione si riduca a zero (il cerchio é ridotto al suo centro),
# mentre le rette (risp. gli altri cerchi) rimangono parallele (risp. concentrici) con distanze
# dal centro del cerchio che si é ridotto a zero (rispettivamente con raggi dei cerchi) aumentati o
# diminuiti di r.
# Se applichiamo questa trasformazione al nostro caso, riducendo a zero il raggio del cerchio più piccolo
# (o di uno dei due se hanno lo stesso raggio) ci ritroveremo con un punto, un cerchio e una retta:
# trovate le circonferenze passanti per il punto e tangenti alla retta e al cerchio (nel modo già noto)
# potremo applicare la trasformazione inversa della dilatazione parallela precedente per determinare
# le circonferenze richieste.
if circle1.radius <= circle2.radius:
smallerCircle = circle1
greaterCircle = circle2
else:
smallerCircle = circle2
greaterCircle = circle1
linePar = []
angle = qad_utils.getAngleBy2Pts(line[0], line[1])
linePar.append(qad_utils.getPolarPointByPtAngle(line[0], angle + math.pi / 2, smallerCircle.radius * s1))
linePar.append(qad_utils.getPolarPointByPtAngle(line[1], angle + math.pi / 2, smallerCircle.radius * s1))
circlePar = QadCircle(greaterCircle)
circlePar.radius = circlePar.radius + smallerCircle.radius * s1
circleList = circleFrom1IntPtLineCircleTanPts(smallerCircle.center, linePar, None, circlePar, None, True)
for circleTan in circleList:
ptPerp = qad_utils.getPerpendicularPointOnInfinityLine(line[0], line[1], circleTan.center)
circleTan.radius = qad_utils.getDistance(ptPerp, circleTan.center)
return circleList
#===============================================================================
# solveApollonius
#===============================================================================
def solveApollonius(c1, c2, c3, s1, s2, s3):
'''
>>> solveApollonius((0, 0, 1), (4, 0, 1), (2, 4, 2), 1,1,1)
Circle(x=2.0, y=2.1, r=3.9)
>>> solveApollonius((0, 0, 1), (4, 0, 1), (2, 4, 2), -1,-1,-1)
Circle(x=2.0, y=0.8333333333333333, r=1.1666666666666667)
Trova il cerchio tangente a tre cerchi (sarebbero 8 cerchi che si trovano con le
8 combinazioni di s1, s2, s3 che assumo valore -1 o 1)
'''
x1 = c1.center.x()
y1 = c1.center.y()
r1 = c1.radius
x2 = c2.center.x()
y2 = c2.center.y()
r2 = c2.radius
x3 = c3.center.x()
y3 = c3.center.y()
r3 = c3.radius
v11 = 2*x2 - 2*x1
v12 = 2*y2 - 2*y1
v13 = x1*x1 - x2*x2 + y1*y1 - y2*y2 - r1*r1 + r2*r2
v14 = 2*s2*r2 - 2*s1*r1
v21 = 2*x3 - 2*x2
v22 = 2*y3 - 2*y2
v23 = x2*x2 - x3*x3 + y2*y2 - y3*y3 - r2*r2 + r3*r3
v24 = 2*s3*r3 - 2*s2*r2
if v11 == 0:
return None
w12 = v12/v11
w13 = v13/v11
w14 = v14/v11
if v21 == 0:
return None
w22 = v22/v21-w12
w23 = v23/v21-w13
w24 = v24/v21-w14
if w22 == 0:
return None
P = -w23/w22
Q = w24/w22
M = -w12*P-w13
N = w14 - w12*Q
a = N*N + Q*Q - 1
b = 2*M*N - 2*N*x1 + 2*P*Q - 2*Q*y1 + 2*s1*r1
c = x1*x1 + M*M - 2*M*x1 + P*P + y1*y1 - 2*P*y1 - r1*r1
# Find a root of a quadratic equation. This requires the circle centers not to be e.g. colinear
if a == 0:
return None
D = (b * b) - (4 * a * c)
# se D é così vicino a zero
if qad_utils.doubleNear(D, 0.0):
D = 0
elif D < 0: # non si può fare la radice quadrata di un numero negativo
return None
rs = (-b-math.sqrt(D))/(2*a)
xs = M+N*rs
ys = P+Q*rs
center = QgsPointXY(xs, ys)
circle = QadCircle().set(center, rs)
return circle
#===============================================================================
# getCircularInversionOfPoint
#===============================================================================
def getCircularInversionOfPoint(circleRef, pt):
"""
la funzione ritorna l'inversione circolare di un punto
"""
dist = qad_utils.getDistance(circleRef.center, pt)
angle = qad_utils.getAngleBy2Pts(circleRef.center, pt)
circInvDist = circleRef.radius * circleRef.radius / dist
return qad_utils.getPolarPointByPtAngle(circleRef.center, angle, circInvDist)
#===============================================================================
# getCircularInversionOfLine
#===============================================================================
def getCircularInversionOfLine(circleRef, line):
"""
la funzione ritorna l'inversione circolare di una linea (che é un cerchio)
"""
angleLine = qad_utils.getAngleBy2Pts(line.getStartPt(), line.getEndPt())
ptNearestLine = QadPerpendicularity.fromPointToInfinityLine(circleRef.center, line)
dist = qad_utils.getDistance(circleRef.center, ptNearestLine)
pt1 = getCircularInversionOfPoint(circleRef, ptNearestLine)
pt = qad_utils.getPolarPointByPtAngle(ptNearestLine, angleLine, dist)
pt2 = getCircularInversionOfPoint(circleRef, pt)
pt = qad_utils.getPolarPointByPtAngle(ptNearestLine, angleLine + math.pi, dist)
pt3 = getCircularInversionOfPoint(circleRef, pt)
return circleFrom3Pts(pt1, pt2, pt3)
#===============================================================================
# getCircularInversionOfCircle
#===============================================================================
def getCircularInversionOfCircle(circleRef, circle):
"""
la funzione ritorna l'inversione circolare di un cerchio (che é un cerchio)
"""
angleLine = qad_utils.getAngleBy2Pts(circle.center, circleRef.center)
ptNearestLine = qad_utils.getPolarPointByPtAngle(circle.center, angleLine, circle.radius)
dist = qad_utils.getDistance(circleRef.center, circle.center)
pt1 = getCircularInversionOfPoint(circleRef, ptNearestLine)
pt = qad_utils.getPolarPointByPtAngle(circle.center, angleLine + math.pi / 2, circle.radius)
pt2 = getCircularInversionOfPoint(circleRef, pt)
pt = qad_utils.getPolarPointByPtAngle(circle.center, angleLine - math.pi / 2, circle.radius)
pt3 = getCircularInversionOfPoint(circleRef, pt)
return circleFrom3Pts(pt1, pt2, pt3)
| gpl-3.0 | 9,054,910,789,273,340,000 | 39.761468 | 118 | 0.607645 | false |
hortonworks/hortonworks-sandbox | desktop/core/src/desktop/middleware_test.py | 1 | 1593 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests for Desktop-specific middleware
from desktop.lib.django_test_util import make_logged_in_client
from nose.tools import assert_equal
def test_jframe_middleware():
c = make_logged_in_client()
path = "/about/?foo=bar&baz=3"
response = c.get(path)
assert_equal(path, response["X-Hue-JFrame-Path"])
path_nocache = "/about/?noCache=blabla&foo=bar&baz=3"
response = c.get(path_nocache)
assert_equal(path, response["X-Hue-JFrame-Path"])
path_nocache = "/about/?noCache=blabla&foo=bar&noCache=twiceover&baz=3"
response = c.get(path_nocache)
assert_equal(path, response["X-Hue-JFrame-Path"])
path = "/about/"
response = c.get(path)
assert_equal(path, response["X-Hue-JFrame-Path"])
response = c.get("/about/?")
assert_equal("/about/", response["X-Hue-JFrame-Path"])
| apache-2.0 | 5,882,152,061,198,778,000 | 36.046512 | 74 | 0.733208 | false |
corumcorp/redsentir | redsentir/lineatiempo/migrations/0001_initial.py | 1 | 2480 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-04-04 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ComentarioP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contenido', models.CharField(max_length=1000, null=True)),
('fecha', models.DateTimeField(auto_now=True)),
('me_gusta', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='MultiMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('archivo', models.FileField(upload_to='static/images/publicaciones')),
('tipo', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Publicacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contenido', models.CharField(max_length=1000, null=True)),
('fecha', models.DateTimeField(auto_now=True)),
('me_gusta', models.IntegerField(default=0)),
('comentarios', models.IntegerField(default=0)),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='multimedia',
name='publicacion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lineatiempo.Publicacion'),
),
migrations.AddField(
model_name='comentariop',
name='publicacion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lineatiempo.Publicacion'),
),
migrations.AddField(
model_name='comentariop',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| gpl-3.0 | 3,658,711,810,807,085,600 | 39 | 121 | 0.587097 | false |
bdunnette/djecks | migrations/0005_auto__add_field_case_source__chg_field_case_title__chg_field_deck_titl.py | 1 | 3187 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Case.source'
db.add_column(u'djecks_case', 'source',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Changing field 'Case.title'
db.alter_column(u'djecks_case', 'title', self.gf('django.db.models.fields.TextField')(default=''))
# Changing field 'Deck.title'
db.alter_column(u'djecks_deck', 'title', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Deleting field 'Case.source'
db.delete_column(u'djecks_case', 'source')
# Changing field 'Case.title'
db.alter_column(u'djecks_case', 'title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Deck.title'
db.alter_column(u'djecks_deck', 'title', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
u'djecks.card': {
'Meta': {'object_name': 'Card'},
'cases': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djecks.Case']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_back': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_front': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'djecks.case': {
'Meta': {'object_name': 'Case'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'decks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djecks.Deck']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'djecks.deck': {
'Meta': {'object_name': 'Deck'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['djecks'] | agpl-3.0 | -5,144,000,211,033,142,000 | 50.419355 | 165 | 0.564481 | false |
jreades/starspy | stars/visualization/kernelDensityTime.py | 1 | 7148 | """
Yet Another Kernel Density Implementation in Python
This one supports updating the raster one event at a time, to allow for time series visualization.
"""
from sys import stdout
import pysal
import numpy
from math import exp,pi,ceil,floor,sqrt
#try:
# from osgeo import gdal, gdal_array
# from osgeo.gdalconst import GDT_Float64
#except ImportError:
# import gdal, gdal_array
# from gdalconst import GDT_Float64
def triangular(z):
return 1 - abs(z)
def uniform(z):
return abs(z)
def quadratic(z):
return 0.75*(1 - z*z)
def quartic(z):
return (15*1.0/16)*(1-z*z)*(1-z*z)
def gaussian(z):
return sqrt(2*pi)*exp(-0.5*z*z)
class KernelDensity:
"""
Kernel Density Estimation
ptGeoObj -- PySAL Point Geo Object -- pysal.open('points.shp','r')
cellSize -- int -- In Map Units.
bandwidth -- float -- In Map Units.
"""
def __init__(self, extent, cellSize, bandwidth, kernel='quadratic', extent_buffer=0):
left, lower, right, upper = extent
left,lower = left-extent_buffer,lower-extent_buffer
right,upper = right+extent_buffer,upper+extent_buffer
self.extent = pysal.cg.Rectangle(left,lower,right,upper)
self.cellSize = cellSize
self.bandwidth = bandwidth
self.kernel = quadratic
if kernel not in ['triangular', 'uniform', 'quadratic', 'quartic', 'gaussian']:
raise 'Unsupported Kernel Type'
else:
self.kernel = eval(kernel)
self._raster = numpy.zeros((self.rows,self.cols))
self.bw = bandwidth
self.cellSize = float(cellSize)
self.grid_lower = lower+(cellSize/2.0)
maxRow = self.rows-1
self.grid_upper = self.grid_lower + (maxRow*self.cellSize)
self.grid_left = left+(self.cellSize/2.0)
self._n = 0
def update(self,X,Y,invert=False):
self._n += 1
cellSize = self.cellSize
radius = self.bandwidth / cellSize
float_i = (Y-self.grid_lower) / cellSize
#float_i = (self.grid_upper-Y) / cellSize
i = int(floor(float_i - radius))
i = i if i >= 0 else 0
I = int(floor(float_i + radius))
I = I if I < self.rows else self.rows-1
float_j = (X-self.grid_left) / cellSize
j = int(floor(float_j - radius))
j = j if j >= 0 else 0
J = int(floor(float_j + radius))
J = J if J < self.cols else self.cols-1
#print
#print "update rows[%d:%d], cols[%d:%d]"%(i,I,j,J)
for row in xrange(i,I+1):
for col in xrange(j,J+1):
x = self.grid_left+(col*cellSize)
y = self.grid_lower+(row*cellSize)
#y = self.grid_upper-(row*cellSize)
d = ((x-X)**2 + (y-Y)**2) ** (0.5)
if d <= self.bw:
z = d/self.bw
if invert:
self._raster[row,col] -= self.kernel(z)
else:
#print "update[%d,%d]"%(row,col)
self._raster[row,col] += self.kernel(z)
@property
def raster(self):
return self._raster / (self._n*self.bw)
@property
def cols(self):
return int(ceil(self.extent.width / float(self.cellSize)))
@property
def rows(self):
return int(ceil(self.extent.height / self.cellSize))
def erdasImage(self, outfilename):
mpValue = self.mpValue
mpArray = self.mpArray
driver = gdal.GetDriverByName('HFA')
out = driver.Create(outfilename, self.cols, self.rows, 1, GDT_Float64)
if mpValue and hasattr(mpValue, 'value'):
mpValue.value = 1
mpArray.value = "The output image file is created."
try:
out.SetGeoTransform([self.extent.left, self.extent.width/self.cols, 0, self.extent.lower, 0, self.extent.height/self.rows])
gdal_array.BandWriteArray(out.GetRasterBand(1), self.raster)
mpValue.value = 100
mpArray.value = "The output image file is successfully written."
return True
except:
mpValue.value = 100
mpArray.value = "Image writing failed."
return False
def asciiTable(self):
mpValue = self.mpValue
mpArray = self.mpArray
tot = float(self.rows)
s = "ncols %d\n"%self.cols
s+= "nrows %d\n"%self.rows
s+= "xllcorner %f\n"%self.extent.left
s+= "yllcorner %f\n"%self.extent.lower
s+= "cellsize %f\n"%self.cellSize
s+= "nodata_value -1\n"
c = 0
for i in xrange(self.rows-1,-1,-1):
for j in xrange(self.cols):
s+="%f "%self.raster[i,j]
s+="\n"
if mpValue and hasattr(mpValue,'value'):
c += 1
mpValue.value = int(round((c/tot)*100))
if mpArray and hasattr(mpArray,'value'):
mpArray.value = "Saving... %d of %d rows remaining"%(i,tot)
else:
stdout.write('\r%f%% Complete.'%(100*(c/tot)))
stdout.flush()
return s
if __name__=='__main__':
def draw(kd):
img = numpy.zeros((kd.rows,kd.cols,3),numpy.uint8)
raster = kd.raster
scaled = (raster-raster.min())/(raster.max()-raster.min())
img[:,:,0] = (scaled*255).astype("B") #red
img[:,:,2] = ((1+(scaled*-1))*255).astype("B") #blue
return Image.fromarray(img)
import time
import datetime
from PIL import Image,ImageDraw
t0 = time.clock()
#shp = pysal.open('/Users/charlie/Documents/data/pittsburgh/pitthom.shp','r')
shp = pysal.open('/Users/charlie/Documents/Work/NIJ/Target1/Mesa Data/Mesa_ResBurgAllYears_withGrids/Mesa_ResBurgAllYears_withGrids.shp','r')
dbf = pysal.open('/Users/charlie/Documents/Work/NIJ/Target1/Mesa Data/Mesa_ResBurgAllYears_withGrids/Mesa_ResBurgAllYears_withGrids.dbf','r')
dates = dbf.by_col("REPORT_DAT")
data = dict([(date,set()) for date in dates])
for date,point in zip(dates,shp):
data[date].add(point)
dates.sort()
extent = [shp.header.get(x) for x in ['BBOX Xmin', 'BBOX Ymin', 'BBOX Xmax', 'BBOX Ymax']]
kd = KernelDensity(extent,400,3500)
#open('kd_ascii.txt','w').write(kd.asciiTable())
start = dates[0]
cur = start
step = datetime.timedelta(days=1)
window = datetime.timedelta(days=120)
window = None
end = dates[-1]
#for i,date in enumerate(dates):
i = 0
while cur <= end:
if cur in data:
evts = data[cur]
if window:
clear = cur-window
if clear in data:
for rx,ry in data[clear]:
kd.update(rx,ry,True)
for x,y in evts:
kd.update(x,y)
img = draw(kd)
d = ImageDraw.Draw(img)
if window:
d.text((0,0),clear.isoformat()+" through "+cur.isoformat())
else:
d.text((0,0),cur.isoformat())
del d
img.save("kd/kd_%d.png"%i)
i+=1
cur+=step
print time.clock()-t0
| gpl-2.0 | 3,872,030,793,151,872,500 | 35.284264 | 145 | 0.559177 | false |
hayj/WorkspaceManager | workspacemanager/test/setuptest.py | 1 | 1453 | # coding: utf-8
import unittest
import doctest
import os
from workspacemanager import setup
from workspacemanager import generateSetup
from workspacemanager.utils import *
from shutil import *
from workspacemanager.test.utils import *
# The level allow the unit test execution to choose only the top level test
min = 0
max = 1
assert min <= max
if min <= 0 <= max:
class DocTest(unittest.TestCase):
def testDoctests(self):
"""Run doctests"""
doctest.testmod(setup)
if min <= 1 <= max:
class Test1(unittest.TestCase):
def setUp(self):
pass
def test1(self):
# Create a fake project:
theProjectDirectory = createFakeDir()
# Check the fake project:
assert os.path.isdir(theProjectDirectory) is True
# Generate the setup and others:
generateSetup(theProjectDirectory=theProjectDirectory)
# Check things:
self.assertTrue("__DES" not in fileToStr(theProjectDirectory + "/setup.py"))
self.assertTrue("<year>" not in fileToStr(theProjectDirectory + "/LICENCE.txt"))
self.assertTrue("version" in fileToStr(theProjectDirectory + "/projecttest/__init__.py"))
if min <= 2 <= max:
pass
if min <= 3 <= max:
pass
if __name__ == '__main__':
unittest.main() # Or execute as Python unit-test in eclipse
| mit | -396,985,961,022,106,430 | 25.418182 | 101 | 0.613902 | false |
michaelkuty/feincms | feincms/module/extensions/ct_tracker.py | 1 | 6323 | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
#
# ct_tracker.py
# FeinCMS
#
# Created by Martin J. Laubach on 02.10.09.
# Copyright (c) 2009 Martin J. Laubach. All rights reserved.
# Updated in 2011 by Matthias Kestenholz for the 1.3 release.
#
# ------------------------------------------------------------------------
"""
Track the content types for pages. Instead of gathering the content
types present in each page at run time, save the current state at
saving time, thus saving at least one DB query on page delivery.
"""
from __future__ import absolute_import, unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import class_prepared, post_save, pre_save
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
from feincms.contrib.fields import JSONField
from feincms.models import ContentProxy
INVENTORY_VERSION = 1
_translation_map_cache = {}
# ------------------------------------------------------------------------
class TrackerContentProxy(ContentProxy):
def _fetch_content_type_counts(self):
"""
If an object with an empty _ct_inventory is encountered, compute all
the content types currently used on that object and save the list in
the object itself. Further requests for that object can then access
that information and find out which content types are used without
resorting to multiple selects on different ct tables.
It is therefore important that even an "empty" object does not have an
empty _ct_inventory.
"""
if 'counts' not in self._cache:
if (self.item._ct_inventory
and self.item._ct_inventory.get('_version_', -1)
== INVENTORY_VERSION):
try:
self._cache['counts'] = self._from_inventory(
self.item._ct_inventory)
except KeyError:
# It's possible that the inventory does not fit together
# with the current models anymore, f.e. because a content
# type has been removed.
pass
if 'counts' not in self._cache:
super(TrackerContentProxy, self)._fetch_content_type_counts()
self.item._ct_inventory = self._to_inventory(
self._cache['counts'])
if hasattr(self.item, 'invalidate_cache'):
self.item.invalidate_cache()
self.item.__class__.objects.filter(id=self.item.id).update(
_ct_inventory=self.item._ct_inventory)
# Run post save handler by hand
if hasattr(self.item, 'get_descendants'):
self.item.get_descendants(include_self=False).update(
_ct_inventory=None)
return self._cache['counts']
def _translation_map(self):
cls = self.item.__class__
if cls not in _translation_map_cache:
# Prime translation map and cache it in the class. This needs to be
# done late as opposed to at class definition time as not all
# information is ready, especially when we are doing a "syncdb" the
# ContentType table does not yet exist
map = {}
for idx, fct in enumerate(self.item._feincms_content_types):
dct = ContentType.objects.get_for_model(fct)
# Rely on non-negative primary keys
map[-dct.id] = idx # From-inventory map
map[idx] = dct.id # To-inventory map
_translation_map_cache[cls] = map
return _translation_map_cache[cls]
def _from_inventory(self, inventory):
"""
Transforms the inventory from Django's content types to FeinCMS's
ContentProxy counts format.
"""
map = self._translation_map()
return dict((region, [
(pk, map[-ct]) for pk, ct in items
]) for region, items in inventory.items() if region != '_version_')
def _to_inventory(self, counts):
map = self._translation_map()
inventory = dict(
(
region,
[(pk, map[ct]) for pk, ct in items],
) for region, items in counts.items()
)
inventory['_version_'] = INVENTORY_VERSION
return inventory
# ------------------------------------------------------------------------
def class_prepared_handler(sender, **kwargs):
# It might happen under rare circumstances that not all model classes
# are fully loaded and initialized when the translation map is accessed.
# This leads to (lots of) crashes on the server. Better be safe and
# kill the translation map when any class_prepared signal is received.
_translation_map_cache.clear()
class_prepared.connect(class_prepared_handler)
# ------------------------------------------------------------------------
def tree_post_save_handler(sender, instance, **kwargs):
"""
Clobber the _ct_inventory attribute of this object and all sub-objects
on save.
"""
# TODO: Does not find everything it should when ContentProxy content
# inheritance has been customized.
instance.get_descendants(include_self=True).update(_ct_inventory=None)
# ------------------------------------------------------------------------
def single_pre_save_handler(sender, instance, **kwargs):
"""Clobber the _ct_inventory attribute of this object"""
instance._ct_inventory = None
# ------------------------------------------------------------------------
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('_ct_inventory', JSONField(
_('content types'), editable=False, blank=True, null=True))
self.model.content_proxy_class = TrackerContentProxy
pre_save.connect(single_pre_save_handler, sender=self.model)
if hasattr(self.model, 'get_descendants'):
post_save.connect(tree_post_save_handler, sender=self.model)
# ------------------------------------------------------------------------
| bsd-3-clause | -4,565,303,736,663,644,700 | 38.030864 | 79 | 0.558754 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/horovod/setup.py | 1 | 24873 | # Copyright 2017 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import os
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.errors import CompileError, DistutilsError, DistutilsPlatformError, LinkError
import shlex
import subprocess
import sys
import textwrap
import traceback
from horovod import __version__
common_mpi_lib = Extension('horovod.common.mpi_lib', [])
tensorflow_mpi_lib = Extension('horovod.tensorflow.mpi_lib', [])
torch_mpi_lib = Extension('horovod.torch.mpi_lib', [])
torch_mpi_lib_impl = Extension('horovod.torch.mpi_lib_impl', [])
def is_build_action():
if len(sys.argv) <= 1:
return False
if sys.argv[1].startswith('build'):
return True
if sys.argv[1].startswith('bdist'):
return True
if sys.argv[1].startswith('install'):
return True
def check_tf_version():
try:
import tensorflow as tf
if tf.__version__ < '1.1.0':
raise DistutilsPlatformError(
'Your TensorFlow version %s is outdated. '
'Horovod requires tensorflow>=1.1.0' % tf.__version__)
except ImportError:
raise DistutilsPlatformError(
'import tensorflow failed, is it installed?\n\n%s' % traceback.format_exc())
except AttributeError:
# This means that tf.__version__ was not exposed, which makes it *REALLY* old.
raise DistutilsPlatformError(
'Your TensorFlow version is outdated. Horovod requires tensorflow>=1.1.0')
def get_cpp_flags(build_ext):
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-O2']
if sys.platform == 'darwin':
# Darwin most likely will have Clang, which has libc++.
flags_to_try = [default_flags + ['-stdlib=libc++'], default_flags]
else:
flags_to_try = [default_flags, default_flags + ['-stdlib=libc++']]
for cpp_flags in flags_to_try:
try:
test_compile(build_ext, 'test_cpp_flags', extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <unordered_map>
void test() {
}
'''))
return cpp_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ compilation flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ compilation flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_include_dirs():
import tensorflow as tf
tf_inc = tf.sysconfig.get_include()
return [tf_inc, '%s/external/nsync/public' % tf_inc]
def get_tf_lib_dirs():
import tensorflow as tf
tf_lib = tf.sysconfig.get_lib()
return [tf_lib]
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
last_err = None
for tf_libs in [['tensorflow_framework'], []]:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_libs',
library_dirs=lib_dirs, libraries=tf_libs,
extra_preargs=cpp_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return tf_libs
except (CompileError, LinkError):
last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine -l link flags to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
last_err = None
cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
for cxx11_abi in ['0', '1']:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_abi',
macros=[(cxx11_abi_macro, cxx11_abi)],
include_dirs=include_dirs, library_dirs=lib_dirs,
libraries=libs, extra_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
void test() {
auto ignore = tensorflow::strings::StrCat("a", "b");
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return cxx11_abi_macro, cxx11_abi
except (CompileError, LinkError):
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_flags(build_ext, cpp_flags):
import tensorflow as tf
try:
return tf.sysconfig.get_compile_flags(), tf.sysconfig.get_link_flags()
except AttributeError:
# fallback to the previous logic
tf_include_dirs = get_tf_include_dirs()
tf_lib_dirs = get_tf_lib_dirs()
tf_libs = get_tf_libs(build_ext, tf_lib_dirs, cpp_flags)
tf_abi = get_tf_abi(build_ext, tf_include_dirs,
tf_lib_dirs, tf_libs, cpp_flags)
compile_flags = []
for include_dir in tf_include_dirs:
compile_flags.append('-I%s' % include_dir)
if tf_abi:
compile_flags.append('-D%s=%s' % tf_abi)
link_flags = []
for lib_dir in tf_lib_dirs:
link_flags.append('-L%s' % lib_dir)
for lib in tf_libs:
link_flags.append('-l%s' % lib)
return compile_flags, link_flags
def get_mpi_flags():
show_command = os.environ.get('HOROVOD_MPICXX_SHOW', 'mpicxx -show')
try:
mpi_show_output = subprocess.check_output(
shlex.split(show_command), universal_newlines=True).strip()
mpi_show_args = shlex.split(mpi_show_output)
if not mpi_show_args[0].startswith('-'):
# Open MPI and MPICH print compiler name as a first word, skip it
mpi_show_args = mpi_show_args[1:]
# strip off compiler call portion and always escape each arg
return ' '.join(['"' + arg.replace('"', '"\'"\'"') + '"'
for arg in mpi_show_args])
except Exception:
raise DistutilsPlatformError(
'%s failed (see error below), is MPI in $PATH?\n'
'Note: If your version of MPI has a custom command to show compilation flags, '
'please specify it with the HOROVOD_MPICXX_SHOW environment variable.\n\n'
'%s' % (show_command, traceback.format_exc()))
def test_compile(build_ext, name, code, libraries=None, include_dirs=None, library_dirs=None, macros=None,
extra_preargs=None):
test_compile_dir = os.path.join(build_ext.build_temp, 'test_compile')
if not os.path.exists(test_compile_dir):
os.makedirs(test_compile_dir)
source_file = os.path.join(test_compile_dir, '%s.cc' % name)
with open(source_file, 'w') as f:
f.write(code)
compiler = build_ext.compiler
[object_file] = compiler.object_filenames([source_file])
shared_object_file = compiler.shared_object_filename(
name, output_dir=test_compile_dir)
compiler.compile([source_file], extra_preargs=extra_preargs,
include_dirs=include_dirs, macros=macros)
compiler.link_shared_object(
[object_file], shared_object_file, libraries=libraries, library_dirs=library_dirs)
return shared_object_file
def get_cuda_dirs(build_ext, cpp_flags):
cuda_include_dirs = []
cuda_lib_dirs = []
cuda_home = os.environ.get('HOROVOD_CUDA_HOME')
if cuda_home:
cuda_include_dirs += ['%s/include' % cuda_home]
cuda_lib_dirs += ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]
cuda_include = os.environ.get('HOROVOD_CUDA_INCLUDE')
if cuda_include:
cuda_include_dirs += [cuda_include]
cuda_lib = os.environ.get('HOROVOD_CUDA_LIB')
if cuda_lib:
cuda_lib_dirs += [cuda_lib]
if not cuda_include_dirs and not cuda_lib_dirs:
# default to /usr/local/cuda
cuda_include_dirs += ['/usr/local/cuda/include']
cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']
try:
test_compile(build_ext, 'test_cuda', libraries=['cudart'], include_dirs=cuda_include_dirs,
library_dirs=cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <cuda_runtime.h>
void test() {
cudaSetDevice(0);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'CUDA library was not found (see error above).\n'
'Please specify correct CUDA location with the HOROVOD_CUDA_HOME '
'environment variable or combination of HOROVOD_CUDA_INCLUDE and '
'HOROVOD_CUDA_LIB environment variables.\n\n'
'HOROVOD_CUDA_HOME - path where CUDA include and lib directories can be found\n'
'HOROVOD_CUDA_INCLUDE - path to CUDA include directory\n'
'HOROVOD_CUDA_LIB - path to CUDA lib directory')
return cuda_include_dirs, cuda_lib_dirs
def get_nccl_vals(build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags):
nccl_include_dirs = []
nccl_lib_dirs = []
nccl_libs = []
nccl_home = os.environ.get('HOROVOD_NCCL_HOME')
if nccl_home:
nccl_include_dirs += ['%s/include' % nccl_home]
nccl_lib_dirs += ['%s/lib' % nccl_home, '%s/lib64' % nccl_home]
nccl_include_dir = os.environ.get('HOROVOD_NCCL_INCLUDE')
if nccl_include_dir:
nccl_include_dirs += [nccl_include_dir]
nccl_lib_dir = os.environ.get('HOROVOD_NCCL_LIB')
if nccl_lib_dir:
nccl_lib_dirs += [nccl_lib_dir]
nccl_link_mode = os.environ.get('HOROVOD_NCCL_LINK', 'STATIC')
if nccl_link_mode.upper() == 'SHARED':
nccl_libs += ['nccl']
else:
nccl_libs += ['nccl_static']
try:
test_compile(build_ext, 'test_nccl', libraries=nccl_libs, include_dirs=nccl_include_dirs + cuda_include_dirs,
library_dirs=nccl_lib_dirs + cuda_lib_dirs, extra_preargs=cpp_flags, code=textwrap.dedent('''\
#include <nccl.h>
#if NCCL_MAJOR < 2
#error Horovod requires NCCL 2.0 or later version, please upgrade.
#endif
void test() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
}
'''))
except (CompileError, LinkError):
raise DistutilsPlatformError(
'NCCL 2.0 library or its later version was not found (see error above).\n'
'Please specify correct NCCL location with the HOROVOD_NCCL_HOME '
'environment variable or combination of HOROVOD_NCCL_INCLUDE and '
'HOROVOD_NCCL_LIB environment variables.\n\n'
'HOROVOD_NCCL_HOME - path where NCCL include and lib directories can be found\n'
'HOROVOD_NCCL_INCLUDE - path to NCCL include directory\n'
'HOROVOD_NCCL_LIB - path to NCCL lib directory')
return nccl_include_dirs, nccl_lib_dirs, nccl_libs
def get_ddl_dirs():
# Default DDL home
ddl_home = '/opt/DL/ddl'
ddl_include_dir = '%s/include' % ddl_home
ddl_lib_dir = '%s/lib' % ddl_home
if not os.path.exists(ddl_lib_dir):
raise DistutilsPlatformError('DDL lib was not found. Please, make sure \'ddl\' package is installed.')
if not os.path.exists(ddl_include_dir):
raise DistutilsPlatformError('DDL include was not found. Please, make sure \'ddl-dev\' package is installed.')
return [ddl_include_dir], [ddl_lib_dir]
def get_common_options(build_ext):
cpp_flags = get_cpp_flags(build_ext)
mpi_flags = get_mpi_flags()
gpu_allreduce = os.environ.get('HOROVOD_GPU_ALLREDUCE')
if gpu_allreduce and gpu_allreduce != 'MPI' and gpu_allreduce != 'NCCL' and \
gpu_allreduce != 'DDL':
raise DistutilsError('HOROVOD_GPU_ALLREDUCE=%s is invalid, supported '
'values are "", "MPI", "NCCL", "DDL".' % gpu_allreduce)
gpu_allgather = os.environ.get('HOROVOD_GPU_ALLGATHER')
if gpu_allgather and gpu_allgather != 'MPI':
raise DistutilsError('HOROVOD_GPU_ALLGATHER=%s is invalid, supported '
'values are "", "MPI".' % gpu_allgather)
gpu_broadcast = os.environ.get('HOROVOD_GPU_BROADCAST')
if gpu_broadcast and gpu_broadcast != 'MPI':
raise DistutilsError('HOROVOD_GPU_BROADCAST=%s is invalid, supported '
'values are "", "MPI".' % gpu_broadcast)
if gpu_allreduce or gpu_allgather or gpu_broadcast:
have_cuda = True
cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(build_ext, cpp_flags)
else:
have_cuda = False
cuda_include_dirs = cuda_lib_dirs = []
if gpu_allreduce == 'NCCL':
have_nccl = True
nccl_include_dirs, nccl_lib_dirs, nccl_libs = get_nccl_vals(
build_ext, cuda_include_dirs, cuda_lib_dirs, cpp_flags)
else:
have_nccl = False
nccl_include_dirs = nccl_lib_dirs = nccl_libs = []
if gpu_allreduce == 'DDL':
have_ddl = True
ddl_include_dirs, ddl_lib_dirs = get_ddl_dirs()
else:
have_ddl = False
ddl_include_dirs = ddl_lib_dirs = []
MACROS = []
INCLUDES = []
SOURCES = []
COMPILE_FLAGS = cpp_flags + shlex.split(mpi_flags)
LINK_FLAGS = shlex.split(mpi_flags)
LIBRARY_DIRS = []
LIBRARIES = []
if have_cuda:
MACROS += [('HAVE_CUDA', '1')]
INCLUDES += cuda_include_dirs
LIBRARY_DIRS += cuda_lib_dirs
LIBRARIES += ['cudart']
if have_nccl:
MACROS += [('HAVE_NCCL', '1')]
INCLUDES += nccl_include_dirs
LINK_FLAGS += ['-Wl,--version-script=hide_nccl.lds']
LIBRARY_DIRS += nccl_lib_dirs
LIBRARIES += nccl_libs
if have_ddl:
MACROS += [('HAVE_DDL', '1')]
INCLUDES += ddl_include_dirs
LIBRARY_DIRS += ddl_lib_dirs
LIBRARIES += ['ddl', 'ddl_pack']
if gpu_allreduce:
MACROS += [('HOROVOD_GPU_ALLREDUCE', "'%s'" % gpu_allreduce[0])]
if gpu_allgather:
MACROS += [('HOROVOD_GPU_ALLGATHER', "'%s'" % gpu_allgather[0])]
if gpu_broadcast:
MACROS += [('HOROVOD_GPU_BROADCAST', "'%s'" % gpu_broadcast[0])]
return dict(MACROS=MACROS,
INCLUDES=INCLUDES,
SOURCES=SOURCES,
COMPILE_FLAGS=COMPILE_FLAGS,
LINK_FLAGS=LINK_FLAGS,
LIBRARY_DIRS=LIBRARY_DIRS,
LIBRARIES=LIBRARIES)
def build_common_extension(build_ext, options, abi_compile_flags):
common_mpi_lib.define_macros = options['MACROS']
common_mpi_lib.include_dirs = options['INCLUDES']
common_mpi_lib.sources = options['SOURCES'] + ['horovod/common/common.cc',
'horovod/common/mpi_message.cc',
'horovod/common/operations.cc',
'horovod/common/timeline.cc']
common_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
abi_compile_flags
common_mpi_lib.extra_link_args = options['LINK_FLAGS']
common_mpi_lib.library_dirs = options['LIBRARY_DIRS']
common_mpi_lib.libraries = options['LIBRARIES']
build_ext.build_extension(common_mpi_lib)
def build_tf_extension(build_ext, options):
check_tf_version()
tf_compile_flags, tf_link_flags = get_tf_flags(
build_ext, options['COMPILE_FLAGS'])
tensorflow_mpi_lib.define_macros = options['MACROS']
tensorflow_mpi_lib.include_dirs = options['INCLUDES']
tensorflow_mpi_lib.sources = options['SOURCES'] + \
['horovod/tensorflow/mpi_ops.cc']
tensorflow_mpi_lib.extra_compile_args = options['COMPILE_FLAGS'] + \
tf_compile_flags
tensorflow_mpi_lib.extra_link_args = options['LINK_FLAGS'] + tf_link_flags
tensorflow_mpi_lib.library_dirs = options['LIBRARY_DIRS']
tensorflow_mpi_lib.libraries = options['LIBRARIES']
build_ext.build_extension(tensorflow_mpi_lib)
# Return ABI flags used for TensorFlow compilation. We will use this flag
# to compile all the libraries.
return [flag for flag in tf_compile_flags if '_GLIBCXX_USE_CXX11_ABI' in flag]
def dummy_import_torch():
try:
import torch
except:
pass
def check_torch_import():
try:
import torch
except ImportError:
raise DistutilsPlatformError(
'import torch failed, is it installed?\n\n%s' % traceback.format_exc())
def is_torch_cuda():
try:
from torch.utils.ffi import create_extension
cuda_test_ext = create_extension(
name='horovod.torch.test_cuda',
headers=['horovod/torch/dummy.h'],
sources=[],
with_cuda=True,
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
cuda_test_ext.build()
return True
except:
print('INFO: Above error indicates that this PyTorch installation does not support CUDA.')
return False
def check_macro(macros, key):
return any(k == key and v for k, v in macros)
def set_macro(macros, key, new_value):
if any(k == key for k, _ in macros):
return [(k, new_value if k == key else v) for k, v in macros]
else:
return macros + [(key, new_value)]
class protect_files(object):
def __init__(self, *files):
self.files = files
def __enter__(self):
for file in self.files:
os.rename(file, file + '.protected')
def __exit__(self, type, value, traceback):
for file in self.files:
os.rename(file + '.protected', file)
def build_torch_extension(build_ext, options, abi_compile_flags):
check_torch_import()
have_cuda = is_torch_cuda()
if not have_cuda and check_macro(options['MACROS'], 'HAVE_CUDA'):
raise DistutilsPlatformError(
'Horovod build with GPU support was requested, but this PyTorch '
'installation does not support CUDA.')
# Update HAVE_CUDA to mean that PyTorch supports CUDA. Internally, we will be checking
# HOROVOD_GPU_(ALLREDUCE|ALLGATHER|BROADCAST) to decide whether we should use GPU
# version or transfer tensors to CPU memory for those operations.
updated_macros = set_macro(
options['MACROS'], 'HAVE_CUDA', str(int(have_cuda)))
# Create_extension overwrites these files which are customized, we need to protect them.
with protect_files('horovod/torch/mpi_lib/__init__.py',
'horovod/torch/mpi_lib_impl/__init__.py'):
from torch.utils.ffi import create_extension
ffi_iface = create_extension(
name='horovod.torch.mpi_lib',
headers=['horovod/torch/interface.h'] +
(['horovod/torch/interface_cuda.h'] if have_cuda else []),
with_cuda=have_cuda,
language='c',
package=True,
sources=[],
extra_compile_args=['-std=c11', '-fPIC', '-O2']
)
ffi_impl = create_extension(
name='horovod.torch.mpi_lib_impl',
headers=[],
with_cuda=have_cuda,
language='c++',
package=True,
source_extension='.cc',
define_macros=updated_macros,
include_dirs=options['INCLUDES'],
sources=options['SOURCES'] + ['horovod/torch/mpi_ops.cc',
'horovod/torch/handle_manager.cc',
'horovod/torch/ready_event.cc',
'horovod/torch/tensor_util.cc',
'horovod/torch/cuda_util.cc',
'horovod/torch/adapter.cc'],
extra_compile_args=options['COMPILE_FLAGS'] + abi_compile_flags,
extra_link_args=options['LINK_FLAGS'],
library_dirs=options['LIBRARY_DIRS'],
libraries=options['LIBRARIES']
)
for ffi, setuptools_ext in [(ffi_iface, torch_mpi_lib),
(ffi_impl, torch_mpi_lib_impl)]:
ffi_ext = ffi.distutils_extension()
# ffi_ext is distutils Extension, not setuptools Extension
for k, v in ffi_ext.__dict__.items():
setuptools_ext.__dict__[k] = v
build_ext.build_extension(setuptools_ext)
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
options = get_common_options(self)
abi_compile_flags = []
built_plugins = []
# If PyTorch is installed, it must be imported before TensorFlow, otherwise
# we may get an error: dlopen: cannot load any more object with static TLS
dummy_import_torch()
if not os.environ.get('HOROVOD_WITHOUT_TENSORFLOW'):
try:
abi_compile_flags = build_tf_extension(self, options)
built_plugins.append(True)
except:
if not os.environ.get('HOROVOD_WITH_TENSORFLOW'):
print('INFO: Unable to build TensorFlow plugin, will skip it.\n\n'
'%s' % traceback.format_exc(), file=sys.stderr)
built_plugins.append(False)
else:
raise
if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):
try:
build_torch_extension(self, options, abi_compile_flags)
built_plugins.append(True)
except:
if not os.environ.get('HOROVOD_WITH_PYTORCH'):
print('INFO: Unable to build PyTorch plugin, will skip it.\n\n'
'%s' % traceback.format_exc(), file=sys.stderr)
built_plugins.append(False)
else:
raise
if not built_plugins:
raise DistutilsError(
'Both TensorFlow and PyTorch plugins were excluded from build. Aborting.')
if not any(built_plugins):
raise DistutilsError(
'Neither TensorFlow nor PyTorch plugins were built. See errors above.')
build_common_extension(self, options, abi_compile_flags)
setup(name='horovod',
version=__version__,
packages=find_packages(),
description='Distributed training framework for TensorFlow, Keras, and PyTorch.',
author='Uber Technologies, Inc.',
long_description=textwrap.dedent('''\
Horovod is a distributed training framework for TensorFlow, Keras, and PyTorch.
The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),
url='https://github.com/uber/horovod',
classifiers=[
'License :: OSI Approved :: Apache Software License'
],
ext_modules=[common_mpi_lib, tensorflow_mpi_lib,
torch_mpi_lib, torch_mpi_lib_impl],
cmdclass={'build_ext': custom_build_ext},
# cffi is required for PyTorch
# If cffi is specified in setup_requires, it will need libffi to be installed on the machine,
# which is undesirable. Luckily, `install` action will install cffi before executing build,
# so it's only necessary for `build*` or `bdist*` actions.
setup_requires=['cffi>=1.4.0'] if is_build_action() else [],
install_requires=['cffi>=1.4.0'],
zip_safe=False)
| apache-2.0 | -2,906,233,090,748,756,500 | 37.985893 | 118 | 0.590359 | false |
brsbilgic/django-quick-reports | docs/source/conf.py | 1 | 9285 | # -*- coding: utf-8 -*-
#
# django-quick-reports documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 12 09:08:52 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-quick-reports'
copyright = u'2015, Baris Bilgic'
author = u'Baris Bilgic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-quick-reportsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-quick-reports.tex', u'django-quick-reports Documentation',
u'Baris Bilgic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-quick-reports', u'django-quick-reports Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-quick-reports', u'django-quick-reports Documentation',
author, 'django-quick-reports', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -1,353,983,440,115,746,000 | 31.693662 | 81 | 0.709101 | false |
skirpichev/omg | diofant/tests/polys/test_polyoptions.py | 1 | 13603 | """Tests for options manager for :class:`Poly` and public API functions."""
import pytest
from diofant import (CC, EX, FF, GF, QQ, RR, ZZ, ComplexField, GeneratorsError,
I, Integer, OptionError, Options, RealField, Symbol, lex,
sqrt)
from diofant.abc import x, y, z
from diofant.polys.polyoptions import (All, Auto, BooleanOption, Domain,
Expand, Extension, Field, Formal, Frac,
Gaussian, Gen, Gens, Greedy, Include,
Method, Modulus, OptionType, Order,
Polys, Sort, Split, Strict, Symbols,
Wrt, allowed_flags, set_defaults)
__all__ = ()
def test_Options_clone():
opt = Options((x, y, z), {'domain': 'ZZ'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
assert opt.args == {'domain': ZZ}
# defaults:
assert opt.flags['all'] is False
assert opt.flags['include'] is False
assert opt.options['strict'] is True
new_opt = opt.clone({'gens': (x, y), 'order': 'lex'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
assert new_opt.gens == (x, y)
assert new_opt.domain == ZZ
assert ('order' in new_opt) is True
opt.spam = 'eggs'
assert opt.spam == 'eggs'
class SpamOpt(BooleanOption, metaclass=OptionType):
option = 'spam'
before = ['gens']
after = ['domain']
Options.__order__ = None
pytest.raises(RuntimeError, lambda: Options._init_dependencies_order())
delattr(Options, 'spam')
del Options.__options__['spam']
Options.__order__ = None
Options._init_dependencies_order()
Options._init_dependencies_order() # noop
pytest.raises(OptionError, lambda: Options((x,), {'gens': (x, y)}))
pytest.raises(OptionError, lambda: Options((x,), {'spam': 1}))
pytest.raises(OptionError, lambda: Options((x,), {'field': True,
'gaussian': True}))
pytest.raises(OptionError, lambda: Options((x,), {'gen': x}, strict=True))
def test_Expand_preprocess():
assert Expand.preprocess(False) is False
assert Expand.preprocess(True) is True
assert Expand.preprocess(0) is False
assert Expand.preprocess(1) is True
pytest.raises(OptionError, lambda: Expand.preprocess(x))
def test_Expand_postprocess():
opt = {'expand': True}
Expand.postprocess(opt)
assert opt == {'expand': True}
def test_Gens_preprocess():
assert Gens.preprocess((None,)) == ()
assert Gens.preprocess((x, y, z)) == (x, y, z)
assert Gens.preprocess(((x, y, z),)) == (x, y, z)
a = Symbol('a', commutative=False)
pytest.raises(GeneratorsError, lambda: Gens.preprocess((x, x, y)))
pytest.raises(GeneratorsError, lambda: Gens.preprocess((x, y, a)))
def test_Gens_postprocess():
opt = {'gens': (x, y)}
Gens.postprocess(opt)
assert opt == {'gens': (x, y)}
def test_Wrt_preprocess():
assert Wrt.preprocess(x) == ['x']
assert Wrt.preprocess('') == []
assert Wrt.preprocess(' ') == []
assert Wrt.preprocess('x,y') == ['x', 'y']
assert Wrt.preprocess('x y') == ['x', 'y']
assert Wrt.preprocess('x, y') == ['x', 'y']
assert Wrt.preprocess('x , y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess([x, y]) == ['x', 'y']
pytest.raises(OptionError, lambda: Wrt.preprocess(','))
pytest.raises(OptionError, lambda: Wrt.preprocess(0))
def test_Wrt_postprocess():
opt = {'wrt': ['x']}
Wrt.postprocess(opt)
assert opt == {'wrt': ['x']}
def test_Sort_preprocess():
assert Sort.preprocess([x, y, z]) == ['x', 'y', 'z']
assert Sort.preprocess((x, y, z)) == ['x', 'y', 'z']
assert Sort.preprocess('x > y > z') == ['x', 'y', 'z']
assert Sort.preprocess('x>y>z') == ['x', 'y', 'z']
pytest.raises(OptionError, lambda: Sort.preprocess(0))
pytest.raises(OptionError, lambda: Sort.preprocess({x, y, z}))
def test_Sort_postprocess():
opt = {'sort': 'x > y'}
Sort.postprocess(opt)
assert opt == {'sort': 'x > y'}
def test_Order_preprocess():
assert Order.preprocess('lex') == lex
def test_Order_postprocess():
opt = {'order': True}
Order.postprocess(opt)
assert opt == {'order': True}
def test_Field_preprocess():
assert Field.preprocess(False) is False
assert Field.preprocess(True) is True
assert Field.preprocess(0) is False
assert Field.preprocess(1) is True
pytest.raises(OptionError, lambda: Field.preprocess(x))
def test_Field_postprocess():
opt = {'field': True}
Field.postprocess(opt)
assert opt == {'field': True}
def test_Greedy_preprocess():
assert Greedy.preprocess(False) is False
assert Greedy.preprocess(True) is True
assert Greedy.preprocess(0) is False
assert Greedy.preprocess(1) is True
pytest.raises(OptionError, lambda: Greedy.preprocess(x))
def test_Greedy_postprocess():
opt = {'greedy': True}
Greedy.postprocess(opt)
assert opt == {'greedy': True}
def test_Domain_preprocess():
assert Domain.preprocess(ZZ) == ZZ
assert Domain.preprocess(QQ) == QQ
assert Domain.preprocess(EX) == EX
assert Domain.preprocess(FF(2)) == FF(2)
assert Domain.preprocess(ZZ.inject(x, y)) == ZZ.inject(x, y)
assert Domain.preprocess('Z') == ZZ
assert Domain.preprocess('Q') == QQ
assert Domain.preprocess('ZZ') == ZZ
assert Domain.preprocess('QQ') == QQ
assert Domain.preprocess('EX') == EX
assert Domain.preprocess('FF(23)') == FF(23)
assert Domain.preprocess('GF(23)') == GF(23)
pytest.raises(OptionError, lambda: Domain.preprocess('Z[]'))
assert Domain.preprocess('Z[x]') == ZZ.inject(x)
assert Domain.preprocess('Q[x]') == QQ.inject(x)
assert Domain.preprocess('ZZ[x]') == ZZ.inject(x)
assert Domain.preprocess('QQ[x]') == QQ.inject(x)
assert Domain.preprocess('Z[x,y]') == ZZ.inject(x, y)
assert Domain.preprocess('Q[x,y]') == QQ.inject(x, y)
assert Domain.preprocess('ZZ[x,y]') == ZZ.inject(x, y)
assert Domain.preprocess('QQ[x,y]') == QQ.inject(x, y)
pytest.raises(OptionError, lambda: Domain.preprocess('Z()'))
assert Domain.preprocess('Z(x)') == ZZ.inject(x).field
assert Domain.preprocess('Q(x)') == QQ.inject(x).field
assert Domain.preprocess('ZZ(x)') == ZZ.inject(x).field
assert Domain.preprocess('QQ(x)') == QQ.inject(x).field
assert Domain.preprocess('Z(x,y)') == ZZ.inject(x, y).field
assert Domain.preprocess('Q(x,y)') == QQ.inject(x, y).field
assert Domain.preprocess('ZZ(x,y)') == ZZ.inject(x, y).field
assert Domain.preprocess('QQ(x,y)') == QQ.inject(x, y).field
assert Domain.preprocess('Q<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('QQ<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
assert Domain.preprocess(
'QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
pytest.raises(OptionError, lambda: Domain.preprocess('abc'))
assert Domain.preprocess('RR') == RR
assert Domain.preprocess('RR_5') == RealField(prec=5)
assert Domain.preprocess('CC') == CC
assert Domain.preprocess('CC_5') == ComplexField(prec=5)
pytest.raises(OptionError, lambda: Domain.preprocess(()))
def test_Domain_postprocess():
pytest.raises(GeneratorsError, lambda: Domain.postprocess({'gens': (x, y),
'domain': ZZ.inject(y, z)}))
pytest.raises(GeneratorsError, lambda: Domain.postprocess({'gens': (),
'domain': EX}))
pytest.raises(GeneratorsError, lambda: Domain.postprocess({'domain': EX}))
def test_Split_preprocess():
assert Split.preprocess(False) is False
assert Split.preprocess(True) is True
assert Split.preprocess(0) is False
assert Split.preprocess(1) is True
pytest.raises(OptionError, lambda: Split.preprocess(x))
def test_Split_postprocess():
pytest.raises(NotImplementedError, lambda: Split.postprocess({'split': True}))
def test_Gaussian_preprocess():
assert Gaussian.preprocess(False) is False
assert Gaussian.preprocess(True) is True
assert Gaussian.preprocess(0) is False
assert Gaussian.preprocess(1) is True
pytest.raises(OptionError, lambda: Gaussian.preprocess(x))
def test_Gaussian_postprocess():
opt = {'gaussian': True}
Gaussian.postprocess(opt)
assert opt == {
'gaussian': True,
'extension': {I},
'domain': QQ.algebraic_field(I),
}
def test_Extension_preprocess():
assert Extension.preprocess(True) is True
assert Extension.preprocess(1) is True
assert Extension.preprocess(False) is False
assert Extension.preprocess([]) is None
assert Extension.preprocess(sqrt(2)) == {sqrt(2)}
assert Extension.preprocess([sqrt(2)]) == {sqrt(2)}
assert Extension.preprocess([sqrt(2), I]) == {sqrt(2), I}
def test_Extension_postprocess():
opt = {'extension': {sqrt(2)}}
Extension.postprocess(opt)
assert opt == {
'extension': {sqrt(2)},
'domain': QQ.algebraic_field(sqrt(2)),
}
opt = {'extension': True}
Extension.postprocess(opt)
assert opt == {'extension': True}
def test_Modulus_preprocess():
assert Modulus.preprocess(23) == 23
assert Modulus.preprocess(Integer(23)) == 23
pytest.raises(OptionError, lambda: Modulus.preprocess(0))
pytest.raises(OptionError, lambda: Modulus.preprocess(x))
def test_Modulus_postprocess():
opt = {'modulus': 5}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5),
}
opt = {'modulus': 5}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5),
}
def test_Strict_preprocess():
assert Strict.preprocess(False) is False
assert Strict.preprocess(True) is True
assert Strict.preprocess(0) is False
assert Strict.preprocess(1) is True
pytest.raises(OptionError, lambda: Strict.preprocess(x))
def test_Strict_postprocess():
opt = {'strict': True}
Strict.postprocess(opt)
assert opt == {'strict': True}
def test_Auto_preprocess():
assert Auto.preprocess(False) is False
assert Auto.preprocess(True) is True
assert Auto.preprocess(0) is False
assert Auto.preprocess(1) is True
pytest.raises(OptionError, lambda: Auto.preprocess(x))
def test_Auto_postprocess():
opt = {'auto': True}
Auto.postprocess(opt)
assert opt == {'auto': True}
def test_Frac_preprocess():
assert Frac.preprocess(False) is False
assert Frac.preprocess(True) is True
assert Frac.preprocess(0) is False
assert Frac.preprocess(1) is True
pytest.raises(OptionError, lambda: Frac.preprocess(x))
def test_Frac_postprocess():
opt = {'frac': True}
Frac.postprocess(opt)
assert opt == {'frac': True}
def test_Formal_preprocess():
assert Formal.preprocess(False) is False
assert Formal.preprocess(True) is True
assert Formal.preprocess(0) is False
assert Formal.preprocess(1) is True
pytest.raises(OptionError, lambda: Formal.preprocess(x))
def test_Formal_postprocess():
opt = {'formal': True}
Formal.postprocess(opt)
assert opt == {'formal': True}
def test_Polys_preprocess():
assert Polys.preprocess(False) is False
assert Polys.preprocess(True) is True
assert Polys.preprocess(0) is False
assert Polys.preprocess(1) is True
pytest.raises(OptionError, lambda: Polys.preprocess(x))
def test_Polys_postprocess():
opt = {'polys': True}
Polys.postprocess(opt)
assert opt == {'polys': True}
def test_Include_preprocess():
assert Include.preprocess(False) is False
assert Include.preprocess(True) is True
assert Include.preprocess(0) is False
assert Include.preprocess(1) is True
pytest.raises(OptionError, lambda: Include.preprocess(x))
def test_Include_postprocess():
opt = {'include': True}
Include.postprocess(opt)
assert opt == {'include': True}
def test_All_preprocess():
assert All.preprocess(False) is False
assert All.preprocess(True) is True
assert All.preprocess(0) is False
assert All.preprocess(1) is True
pytest.raises(OptionError, lambda: All.preprocess(x))
def test_All_postprocess():
opt = {'all': True}
All.postprocess(opt)
assert opt == {'all': True}
def test_Gen_preprocess():
opt = {'gen': 'spam'}
pytest.raises(OptionError, lambda: Gen.preprocess(opt))
def test_Gen_postprocess():
opt = {'gen': x}
Gen.postprocess(opt)
assert opt == {'gen': x}
def test_Symbols_preprocess():
pytest.raises(OptionError, lambda: Symbols.preprocess(x))
def test_Symbols_postprocess():
opt = {'symbols': [x, y, z]}
Symbols.postprocess(opt)
assert opt == {'symbols': [x, y, z]}
def test_Method_preprocess():
pytest.raises(OptionError, lambda: Method.preprocess(10))
def test_Method_postprocess():
opt = {'method': 'f5b'}
Method.postprocess(opt)
assert opt == {'method': 'f5b'}
def test_allowed_flags():
pytest.raises(OptionError, lambda: allowed_flags({'spam': True}, []))
def test_set_defaults():
assert set_defaults({'defaults': None}) == {'defaults': None}
| bsd-3-clause | 5,180,508,136,568,610,000 | 25.830375 | 91 | 0.625524 | false |
avalentino/gsdview | exectools/tests/gtkshell.py | 1 | 9226 | #!/usr/bin/env python
# GSDView - Geo-Spatial Data Viewer
# Copyright (C) 2008-2021 Antonio Valentino <[email protected]>
#
# This module is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this module if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 US
"""Simple interactive shell implementation using exectools and GTK+."""
import time
import logging
from gi.repository import Gtk, Gdk
import exectools
from exectools.gtk import (GtkOutputPane, GtkOutputHandler, GtkToolController,
GtkDialogLoggingHandler, GtkLoggingHandler)
class GtkShell:
"""GTK+ interactive shell using tool controller."""
historyfile = 'history.txt'
def __init__(self, debug=False):
# Command box
cmdlabel = Gtk.Label(label='cmd >')
cmdlabel.set_padding(5, 0)
self.cmdbox = Gtk.ComboBoxText.new_with_entry()
self.cmdbox.set_active(0)
self.cmdbox.set_focus_on_click(False)
self.cmdbox.connect('changed', self.on_item_selected)
completion = Gtk.EntryCompletion()
completion.set_model(self.cmdbox.get_model())
completion.set_text_column(0)
self.entry = self.cmdbox.get_child()
self.entry.set_completion(completion)
self.entry.connect('activate', self.on_entry_activate)
self.entry.connect('key-press-event', self.on_key_pressed)
self.entry.connect('populate-popup', self.on_populate_popup)
# self.cmdbutton = Gtk.Button.new_with_mnemonic('_Execute')
self.cmdbutton = Gtk.Button(stock=Gtk.STOCK_EXECUTE)
self.cmdbutton.connect('clicked', self.on_cmdbutton_clicked)
# Note: set_always_show_image is new in Gtk 3.6
self.cmdbutton.set_always_show_image(True)
hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, spacing=3)
hbox.pack_start(cmdlabel, expand=False, fill=False, padding=0)
hbox.pack_start(self.cmdbox, expand=True, fill=True, padding=0)
hbox.pack_start(self.cmdbutton, expand=False, fill=False, padding=0)
# Output pane
outputpane = GtkOutputPane(hide_button=False)
outputpane.set_editable(False)
scrolledwin = Gtk.ScrolledWindow()
scrolledwin.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
scrolledwin.add(outputpane)
# Status bar
self.statusbar = Gtk.Statusbar()
id_ = self.statusbar.get_context_id('ready')
self.statusbar.push(id_, 'Ready.')
# Main window
vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, spacing=3)
vbox.set_border_width(3)
vbox.pack_start(hbox, expand=False, fill=True, padding=0)
vbox.pack_start(scrolledwin, expand=True, fill=True, padding=0)
vbox.pack_start(self.statusbar, expand=False, fill=True, padding=0)
accelgroup = Gtk.AccelGroup()
accelgroup.connect(ord('d'), Gdk.ModifierType.CONTROL_MASK,
Gtk.AccelFlags.VISIBLE, self.quit)
self.mainwin = Gtk.Window()
self.mainwin.set_title('GTK Shell')
theme = Gtk.IconTheme.get_default()
icon = theme.load_icon(Gtk.STOCK_EXECUTE, Gtk.IconSize.LARGE_TOOLBAR,
Gtk.IconLookupFlags(0))
self.mainwin.set_icon(icon)
self.mainwin.add(vbox)
self.mainwin.set_default_size(650, 500)
self.mainwin.add_accel_group(accelgroup)
self.mainwin.connect('destroy', self.quit)
self.mainwin.show_all()
# Setup the log system
if debug:
level = logging.DEBUG
logging.basicConfig(level=level)
else:
level = logging.INFO
self.logger = logging.getLogger()
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler = GtkLoggingHandler(outputpane)
handler.setLevel(level)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
formatter = logging.Formatter('%(message)s')
handler = GtkDialogLoggingHandler(parent=self.mainwin, dialog=None)
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(level)
# Setup high level components and initialize the parent classes
handler = GtkOutputHandler(self.logger, self.statusbar)
self.tool = exectools.ToolDescriptor('', stdout_handler=handler)
self.controller = GtkToolController(logger=self.logger)
self.controller.connect('finished', self.on_finished)
# Final setup
self._state = 'ready' # or maybe __state
self.logger.debug('gtkshell session started at %s.' % time.asctime())
self.load_history()
def main(self):
Gtk.main()
def quit(self, *data):
try:
self.save_history()
finally:
self.logger.debug(
'gtkshell session stopped at %s.' % time.asctime())
Gtk.main_quit()
def load_history(self):
try:
for cmd in open(self.historyfile):
self.cmdbox.append_text(cmd.rstrip())
self.logger.debug('history file "%s" loaded.' % self.historyfile)
except OSError as e:
self.logger.debug('unable to read the history file "%s": %s.' %
(self.historyfile, e))
def save_history(self):
try:
liststore = self.cmdbox.get_model()
history = '\n'.join([item[0] for item in liststore])
f = open(self.historyfile, 'w')
f.write(history)
f.close()
self.logger.debug('history saved in %s' % self.historyfile)
except OSError as e:
self.logger.warning('unable to save the history file "%s": %s' %
(self.historyfile, e))
def _reset(self):
self.controller._reset()
self.cmdbutton.set_label(Gtk.STOCK_EXECUTE)
self.cmdbox.set_sensitive(True)
self.entry.grab_focus()
def reset(self):
self._reset()
self.state = 'ready'
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state == 'ready':
self._reset()
id_ = self.statusbar.get_context_id('running')
self.statusbar.pop(id_)
elif state == 'running':
self.cmdbox.set_sensitive(False)
id_ = self.statusbar.get_context_id('running')
self.statusbar.push(id_, 'Running ...')
self.cmdbutton.set_label(Gtk.STOCK_STOP)
else:
raise ValueError('invalid status: "%s".' % state)
self._state = state
def execute(self):
cmd = self.entry.get_text()
if cmd:
self.entry.set_text('')
self.cmdbox.append_text(cmd)
cmd = cmd.split()
try:
self.state = 'running'
self.controller.run_tool(self.tool, *cmd)
# raise RuntimeError('simulated runtime error')
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.logger.error(e, exc_info=True)
self.state = 'ready'
def on_key_pressed(self, widget, event):
key = Gdk.keyval_name(event.keyval)
if key in ('Up', 'Down', 'Page_Up', 'Page_Down'):
self.cmdbox.popup()
return True
def on_cmdbutton_clicked(self, widget=None):
if self.state == 'ready':
self.execute()
elif self.state == 'running':
self.controller.stop_tool()
def on_entry_activate(self, widget=None):
if self.state == 'running':
return
self.execute()
def on_item_selected(self, widget):
self.entry.set_position(-1)
def on_populate_popup(self, widget, menu):
# separator
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
# Clear history
item = Gtk.ImageMenuItem(Gtk.STOCK_CLEAR)
item.set_name('clear_history')
item.connect('activate', self.on_clear_history, None)
item.connect('activate', self.on_clear_entry, None)
item.show()
menu.append(item)
def on_clear_history(self, widget=None):
liststore = self.cmdbox.get_model()
liststore.clear()
def on_clear_entry(self, widget=None):
self.entry.set_text('')
def on_finished(self, widget=None, returncode=0):
self.reset()
if __name__ == '__main__':
GtkShell(debug=True).main()
| gpl-2.0 | -1,603,177,992,602,668,000 | 33.94697 | 78 | 0.609365 | false |
helixyte/TheLMA | thelma/resources/experiment.py | 1 | 12060 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Experiment resources.
"""
from datetime import datetime
import logging
from pyramid.httpexceptions import HTTPBadRequest
from everest.querying.specifications import AscendingOrderSpecification
from everest.querying.specifications import DescendingOrderSpecification
from everest.querying.specifications import cntd
from everest.representers.dataelements import DataElementAttributeProxy
from everest.representers.interfaces import IDataElement
from everest.resources.base import Collection
from everest.resources.base import Member
from everest.resources.descriptors import attribute_alias
from everest.resources.descriptors import collection_attribute
from everest.resources.descriptors import member_attribute
from everest.resources.descriptors import terminal_attribute
from everest.resources.utils import get_root_collection
from everest.resources.utils import url_to_resource
from thelma.entities.racklayout import RackLayout
from thelma.entities.utils import get_current_user
from thelma.interfaces import IExperiment
from thelma.interfaces import IExperimentDesign
from thelma.interfaces import IExperimentDesignRack
from thelma.interfaces import IExperimentJob
from thelma.interfaces import IExperimentMetadata
from thelma.interfaces import IExperimentMetadataType
from thelma.interfaces import IExperimentRack
from thelma.interfaces import ILabIsoRequest
from thelma.interfaces import IMoleculeDesignPoolSet
from thelma.interfaces import IPlate
from thelma.interfaces import IRack
from thelma.interfaces import IRackLayout
from thelma.interfaces import IRackShape
from thelma.interfaces import ISubproject
from thelma.interfaces import ITag
from thelma.resources.base import RELATION_BASE_URL
from thelma.tools.experiment import get_writer
from thelma.tools.metadata.ticket \
import IsoRequestTicketDescriptionUpdater
from thelma.tools.metadata.ticket import IsoRequestTicketActivator
from thelma.tools.metadata.ticket import IsoRequestTicketCreator
from thelma.tools.metadata.ticket import IsoRequestTicketDescriptionRemover
from thelma.tools.semiconstants import get_experiment_metadata_type
from thelma.tools.stock.base import STOCKMANAGEMENT_USER
__docformat__ = 'reStructuredText en'
__all__ = ['ExperimentMetadataTypeMember',
'ExperimentCollection',
'ExperimentDesignCollection',
'ExperimentDesignMember',
'ExperimentDesignRackCollection',
'ExperimentDesignRackMember',
'ExperimentMember',
'ExperimentMetadataCollection',
'ExperimentMetadataMember',
'ExperimentRackCollection',
'ExperimentRackMember',
]
class ExperimentMetadataTypeMember(Member):
relation = '%s/experiment-metadata-type' % RELATION_BASE_URL
title = attribute_alias('display_name')
display_name = terminal_attribute(str, 'display_name')
class ExperimentDesignRackMember(Member):
relation = "%s/experiment-design-rack" % RELATION_BASE_URL
title = attribute_alias('label')
label = terminal_attribute(str, 'label')
rack_shape = member_attribute(IRackShape, 'rack_layout.shape')
rack_layout = member_attribute(IRackLayout, 'rack_layout')
tags = collection_attribute(ITag, 'tags')
class ExperimentDesignRackCollection(Collection):
title = 'Experiment Design Racks'
root_name = 'experiment-design-racks'
description = 'Manage experiment design racks'
# default_order = asc('label')
class ExperimentDesignMember(Member):
relation = "%s/experiment-design" % RELATION_BASE_URL
title = terminal_attribute(str, 'slug')
rack_shape = member_attribute(IRackShape, 'rack_shape')
experiment_design_racks = collection_attribute(IExperimentDesignRack,
'experiment_design_racks')
experiments = collection_attribute(IExperiment, 'experiments')
experiment_metadata = member_attribute(IExperimentMetadata,
'experiment_metadata')
class ExperimentDesignCollection(Collection):
title = 'Experiment Designs'
root_name = 'experiment-designs'
description = 'Manage experiment designs'
default_order = AscendingOrderSpecification('label')
class ExperimentMember(Member):
relation = '%s/experiment' % RELATION_BASE_URL
title = attribute_alias('label')
label = terminal_attribute(str, 'label')
source_rack = member_attribute(IRack, 'source_rack')
experiment_design = member_attribute(IExperimentDesign,
'experiment_design')
experiment_racks = collection_attribute(IExperimentRack,
'experiment_racks')
experiment_job = member_attribute(IExperimentJob, 'job')
experiment_metadata_type = \
member_attribute(IExperimentMetadataType,
'experiment_design.experiment_metadata.experiment_metadata_type')
def get_writer(self):
return get_writer(self.get_entity())
class ExperimentCollection(Collection):
title = 'Experiments'
root_name = 'experiments'
description = 'Manage experiments'
default_order = AscendingOrderSpecification('label')
class ExperimentMetadataMember(Member):
relation = '%s/experiment-metadata' % RELATION_BASE_URL
label = terminal_attribute(str, 'label')
title = attribute_alias('label')
ticket_number = terminal_attribute(int, 'ticket_number')
subproject = member_attribute(ISubproject, 'subproject')
number_replicates = terminal_attribute(int, 'number_replicates')
molecule_design_pool_set = member_attribute(IMoleculeDesignPoolSet,
'molecule_design_pool_set')
experiment_design = member_attribute(IExperimentDesign,
'experiment_design')
iso_request = member_attribute(ILabIsoRequest, 'lab_iso_request')
creation_date = terminal_attribute(datetime, 'creation_date')
experiment_metadata_type = member_attribute(IExperimentMetadataType,
'experiment_metadata_type')
def __getitem__(self, name):
if name == 'tags':
tags_dict = {}
design_racks = self.__get_design_racks()
for rack in design_racks:
for tp in rack.rack_layout.tagged_rack_position_sets:
for tag in tp.tags:
tags_dict[tag.get_entity().slug] = tag
tag_coll = get_root_collection(ITag)
tag_coll.filter = cntd(id=[tag.id for tag in tags_dict.values()])
result = tag_coll
elif name == 'experiment-design-racks':
result = self.__get_design_racks()
else:
result = Member.__getitem__(self, name)
return result
@classmethod
def create_from_entity(cls, entity):
if entity.ticket_number is None:
# Create a new ticket and attach the ticket number.
user = get_current_user()
ticket_creator = \
IsoRequestTicketCreator(requester=user,
experiment_metadata=entity)
entity.ticket_number = \
cls.__run_trac_tool(ticket_creator,
'Could not update the ticket: %s.')
return cls(entity)
def update(self, data):
if IDataElement.providedBy(data): # pylint: disable=E1101
# FIXME: This really should be a PATCH operation.
prx = DataElementAttributeProxy(data)
self_entity = self.get_entity()
changed_num_reps = prx.number_replicates != self.number_replicates
emt_id = prx.experiment_metadata_type.get('id')
changed_em_type = emt_id != self.experiment_metadata_type.id
if changed_em_type or changed_num_reps:
if changed_num_reps:
self_entity.number_replicates = prx.number_replicates
if changed_em_type:
self_entity.experiment_metadata_type = \
get_experiment_metadata_type(emt_id)
if not self_entity.experiment_design is None:
# invalidate data to force a fresh upload of the XLS file
self_entity.experiment_design.experiment_design_racks = []
self_entity.experiment_design.worklist_series = None
if not self_entity.lab_iso_request is None:
shape = self_entity.lab_iso_request.rack_layout.shape
new_layout = RackLayout(shape=shape)
self_entity.lab_iso_request.rack_layout = new_layout
self_entity.lab_iso_request.owner = ''
changed_sp = self_entity.subproject.id != prx.subproject.get('id')
if changed_sp:
new_sp = \
url_to_resource(prx.subproject.get('href')).get_entity()
self_entity.subproject = new_sp
self_entity.label = prx.label
# Perform appropriate Trac updates.
if not self_entity.lab_iso_request is None:
if self.iso_request.owner == STOCKMANAGEMENT_USER:
ticket_activator = IsoRequestTicketActivator(
experiment_metadata=self_entity)
self.__run_trac_tool(ticket_activator,
'Could not update the ticket: %s.')
else:
if changed_em_type or changed_num_reps:
trac_updater = IsoRequestTicketDescriptionRemover(
experiment_metadata=self_entity,
changed_num_replicates=changed_num_reps,
changed_em_type=changed_em_type)
else:
url = 'http://thelma/public//LOUICe.html#' \
+ self.path
iso_url = 'http://thelma/public//LOUICe.html#' \
+ self.iso_request.path
trac_updater = IsoRequestTicketDescriptionUpdater(
experiment_metadata=self_entity,
experiment_metadata_link=url,
iso_request_link=iso_url)
self.__run_trac_tool(trac_updater,
'Could not update the ticket: %s.')
else:
Member.update(self, data)
@classmethod
def __run_trac_tool(cls, tool, error_msg_text):
tool.run()
if not tool.transaction_completed():
exc_msg = str(tool.get_messages(logging_level=logging.ERROR))
raise HTTPBadRequest(error_msg_text % exc_msg).exception
return tool.return_value
def __get_design_racks(self):
if self.experiment_design is not None:
design_racks = self.experiment_design.experiment_design_racks
else: # order only type
design_racks = []
return design_racks
class ExperimentMetadataCollection(Collection):
title = 'Experiment Metadata'
root_name = 'experiment-metadatas'
description = 'Manage the experiment metadata'
default_order = DescendingOrderSpecification('creation_date')
class ExperimentRackMember(Member):
relation = '%s/experiment-rack' % RELATION_BASE_URL
experiment = member_attribute(IExperiment, 'experiment')
design_rack = member_attribute(IExperimentDesignRack, 'design_rack')
plate = member_attribute(IPlate, 'rack')
source_rack = member_attribute(IRack, 'source_rack')
class ExperimentRackCollection(Collection):
title = 'Cell Plates'
root_name = 'experiment-racks'
description = 'Manage cell plates'
| mit | -6,446,421,184,529,573,000 | 43.832714 | 80 | 0.646186 | false |
ArcasProject/Arcas | src/arcas/PLOS/main.py | 1 | 3663 | from arcas.tools import Api
import xml.etree.ElementTree as etree
from xml.etree import ElementTree
class Plos(Api):
def __init__(self):
self.standard = 'http://api.plos.org/search?q='
def create_url_search(self, parameters):
"""Creates the search url, combining the standard url and various
search parameters."""
url = self.standard
url += parameters[0]
for i in parameters[1:]:
if 'rows=' in i or 'start=' in i:
url += '&{}'.format(i)
else:
url += '+AND+{}'.format(i)
return url
def to_dataframe(self, raw_article):
"""A function which takes a dictionary with structure of the PLOS
results and transform it to a standardized format.
"""
raw_article['author'] = raw_article.get('author_display', None)
raw_article['abstract'] = raw_article.get('abstract', [None])
raw_article['date'] = int(raw_article.get('publication_date', '0').split('-')[0])
raw_article['journal'] = raw_article.get('journal', None)
raw_article['provenance'] = 'PLOS'
raw_article['score'] = raw_article.get('score', None)
if raw_article['score'] is not None:
raw_article['score'] = int(raw_article['score'])
raw_article['doi'] = raw_article.get('id', None)
raw_article['url'] = 'https://doi.org/' + raw_article['id']
raw_article['title'] = raw_article.get('title_display', None)
raw_article['key'], raw_article['unique_key'] = self.create_keys(raw_article)
raw_article['category'] = 'Not available'
raw_article['open_access'] = 'Not available'
return self.dict_to_dataframe(raw_article)
@staticmethod
def xml_to_dict(record):
"""Xml response with information on article to dictionary"""
d = {}
for key, value in record:
if key is not None:
if value is not None:
d[key] = value
else:
d[key] = []
current_key = key
else:
if value is not None:
d[current_key].append(value)
return d
def parse(self, root):
"""Parsing the xml file"""
if root['response']['numFound'] == 0:
return False
return root['response']['docs']
@staticmethod
def parameters_fix(author=None, title=None, abstract=None, year=None,
records=None, start=None, category=None, journal=None,
keyword=None):
parameters = []
if author is not None:
parameters.append('author:"{}"'.format(author))
if title is not None:
parameters.append('title:"{}"'.format(title))
if abstract is not None:
parameters.append('abstract:"{}"'.format(abstract))
if year is not None:
parameters.append('publication_date:[{0}-01-01T00:00:00Z TO '
'{0}-12-30T23:59:59Z]'.format(year))
if journal is not None:
parameters.append('journal:"{}"'.format(journal))
if category is not None:
parameters.append('subject:"{}"'.format(category))
if keyword is not None:
parameters.append('everything:"{}"'.format(keyword))
if records is not None:
parameters.append('rows={}'.format(records))
if start is not None:
parameters.append('start={}'.format(start))
return parameters
@staticmethod
def get_root(response):
root = response.json()
return root
| mit | 3,411,965,802,518,887,400 | 36.762887 | 89 | 0.555283 | false |
b1-systems/kiwi | kiwi/repository/base.py | 1 | 3962 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
class RepositoryBase(object):
"""
Implements base class for package manager repository handling
Attributes
:param object root_bind: instance of :class:`RootBind`
:param str root_dir: root directory path name
:param str shared_location: shared directory between image root
and build system root
"""
def __init__(self, root_bind, custom_args=None):
self.root_bind = root_bind
self.root_dir = root_bind.root_dir
self.shared_location = root_bind.shared_location
self.post_init(custom_args)
def post_init(self, custom_args):
"""
Post initialization method
Implementation in specialized repository class
:param list custom_args: unused
"""
pass
def use_default_location(self):
"""
Call repository operations with default repository manager setup
Implementation in specialized repository class
"""
raise NotImplementedError
def runtime_config(self):
"""
Repository runtime configuration and environment
Implementation in specialized repository class
"""
raise NotImplementedError
def add_repo(
self, name, uri, repo_type, prio, dist, components,
user, secret, credentials_file, repo_gpgcheck, pkg_gpgcheck
):
"""
Add repository
Implementation in specialized repository class
:param str name: unused
:param str uri: unused
:param repo_type: unused
:param int prio: unused
:param dist: unused
:param components: unused
:param user: unused
:param secret: unused
:param credentials_file: unused
:param repo_gpgcheck: unused
:param pkg_gpgcheck: unused
"""
raise NotImplementedError
def setup_package_database_configuration(self):
"""
Setup package database configuration
Implementation in specialized repository class
"""
raise NotImplementedError
def import_trusted_keys(self, signing_keys):
"""
Imports trusted keys into the image
Implementation in specialized repository class
:param list signing_keys: list of the key files to import
"""
raise NotImplementedError
def cleanup_unused_repos(self):
"""
Cleanup/Delete unused repositories
Only configured repositories according to the image configuration
are allowed to be active when building
Implementation in specialized repository class
"""
raise NotImplementedError
def delete_repo(self, name):
"""
Delete repository
Implementation in specialized repository class
:param str name: unused
"""
raise NotImplementedError
def delete_all_repos(self):
"""
Delete all repositories
Implementation in specialized repository class
"""
raise NotImplementedError
def delete_repo_cache(self, name):
"""
Delete repository cache
Implementation in specialized repository class
:param str name: unused
"""
raise NotImplementedError
| gpl-3.0 | 7,048,106,492,177,465,000 | 26.901408 | 73 | 0.650681 | false |
cangencer/hazelcast-python-client | tests/address_test.py | 1 | 1917 | import unittest
from hazelcast.core import Address, Member
from hazelcast.util import get_possible_addresses
class AddressTest(unittest.TestCase):
def test_no_given_address(self):
addresses = get_possible_addresses([])
self.assertItemsEqual(addresses,
[Address("127.0.0.1", 5701), Address("127.0.0.1", 5702), Address("127.0.0.1", 5703)])
def test_single_given_address_with_no_port(self):
addresses = ["127.0.0.1"]
addresses = get_possible_addresses(addresses)
self.assertItemsEqual(addresses,
[Address("127.0.0.1", 5701), Address("127.0.0.1", 5702), Address("127.0.0.1", 5703)])
def test_single_address_and_port(self):
addresses = ["127.0.0.1:5701"]
addresses = get_possible_addresses(addresses)
self.assertItemsEqual(addresses, [Address("127.0.0.1", 5701)])
def test_multiple_addresses(self):
addresses = ["127.0.0.1:5701", "10.0.0.1"]
addresses = get_possible_addresses(addresses)
self.assertItemsEqual(addresses,
[Address("127.0.0.1", 5701), Address("10.0.0.1", 5701), Address("10.0.0.1", 5702),
Address("10.0.0.1", 5703)])
def test_multiple_addresses_non_unique(self):
addresses = ["127.0.0.1:5701", "127.0.0.1:5701"]
addresses = get_possible_addresses(addresses)
self.assertItemsEqual(addresses, [Address("127.0.0.1", 5701)])
def test_addresses_and_members(self):
addresses = ["127.0.0.1:5701"]
member_list = [Member(Address("10.0.0.1", 5703), "uuid1"), Member(Address("10.0.0.2", 5701), "uuid2")]
addresses = get_possible_addresses(addresses, member_list)
self.assertItemsEqual(addresses,
[Address("127.0.0.1", 5701), Address("10.0.0.1", 5703), Address("10.0.0.2", 5701)])
| apache-2.0 | -2,888,269,739,710,410,000 | 35.865385 | 115 | 0.593636 | false |
flavour/ifrc_qa | modules/s3db/ocr.py | 1 | 7350 | # -*- coding: utf-8 -*-
""" OCR Utility Functions
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("OCRDataModel",
"ocr_buttons",
)
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class OCRDataModel(S3Model):
"""
"""
names = ("ocr_meta",
"ocr_payload",
"ocr_form_status",
"ocr_field_crops",
"ocr_data_xml",
)
def model(self):
#T = current.T
#messages = current.messages
#UNKNOWN_OPT = messages.UNKNOWN_OPT
#NONE = messages["NONE"]
define_table = self.define_table
# Upload folders
folder = current.request.folder
metadata_folder = os.path.join(folder, "uploads", "ocr_meta")
payload_folder = os.path.join(folder, "uploads", "ocr_payload")
# =====================================================================
# OCR Meta Data
#
tablename = "ocr_meta"
define_table(tablename,
Field("form_uuid",
notnull=True,
length=128,
unique=True),
Field("resource_name",
notnull=True),
Field("s3ocrxml_file", "upload",
length = current.MAX_FILENAME_LENGTH,
uploadfolder = metadata_folder,
),
Field("layout_file", "upload",
length = current.MAX_FILENAME_LENGTH,
uploadfolder = metadata_folder,
),
Field("revision",
notnull=True,
length=128,
unique=True),
Field("pages", "integer"),
*s3_meta_fields())
#======================================================================
# OCR Payload
#
tablename = "ocr_payload"
define_table(tablename,
# a set of images = one complete form
Field("image_set_uuid",
notnull=True),
Field("image_file", "upload",
length = current.MAX_FILENAME_LENGTH,
notnull = True,
uploadfolder = payload_folder,
),
Field("page_number", "integer",
notnull=True),
*s3_meta_fields())
#======================================================================
# OCR Form Status
#
tablename = "ocr_form_status"
define_table(tablename,
Field("image_set_uuid",
notnull=True,
length=128,
unique=True),
Field("form_uuid",
notnull=True),
Field("review_status", "integer",
notnull=True,
default=0),
Field("job_uuid",
length=128,
unique=True),
Field("job_has_errors", "integer"),
*s3_meta_fields())
#======================================================================
# OCR Field Crops
#
tablename = "ocr_field_crops"
define_table(tablename,
Field("image_set_uuid",
notnull=True),
Field("resource_table",
notnull=True),
Field("field_name",
notnull=True),
Field("image_file", "upload",
length = current.MAX_FILENAME_LENGTH,
notnull = True,
uploadfolder = payload_folder,
),
Field("value"),
Field("sequence", "integer"),
*s3_meta_fields())
#======================================================================
# OCR XML Data
#
tablename = "ocr_data_xml"
define_table(tablename,
Field("image_set_uuid",
length=128,
unique=True,
notnull=True),
Field("data_file", "upload",
length = current.MAX_FILENAME_LENGTH,
notnull = True,
uploadfolder = payload_folder,
),
Field("form_uuid",
notnull=True,
default=""),
*s3_meta_fields())
# =============================================================================
def ocr_buttons(r):
""" Generate 'Print PDF' button in the view """
if not current.deployment_settings.has_module("ocr"):
return ""
if r.component:
urlargs = [r.id, r.component_name]
else:
urlargs = []
f = r.function
c = r.controller
a = r.application
T = current.T
UPLOAD = T("Upload Scanned OCR Form")
DOWNLOAD = T("Download OCR-able PDF Form")
_style = "height:10px;float:right;padding:3px;"
output = DIV(
A(IMG(_src="/%s/static/img/upload-ocr.png" % a, _alt=UPLOAD),
_id="upload-pdf-btn",
_href=URL(c=c, f=f, args=urlargs + ["import.pdf"]),
_title=UPLOAD,
_style=_style),
A(IMG(_src="/%s/static/img/download-ocr.png" % a, _alt=DOWNLOAD),
_id="download-pdf-btn",
_href=URL(c=c, f=f, args=urlargs + ["create.pdf"]),
_title=DOWNLOAD,
_style=_style),
)
return output
# END =========================================================================
| mit | 1,902,842,553,426,639,000 | 34.167464 | 79 | 0.428027 | false |
libo/Enigma2 | lib/python/Screens/TimerEdit.py | 1 | 14240 | from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from time import time
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
print "key_red_choice:",self.key_red_choice
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
def fillTimerList(self):
list = self.list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
list.sort(cmp = lambda x, y: x[0].begin < y[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
if not cur:
return
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name))
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
print "finished edit"
if answer[0]:
print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
else:
print "Timeredit aborted"
def finishedAdd(self, answer):
print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
else:
print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button("Edit")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def toggleTimer1(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
else:
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.finishedEdit((True, self.timer[0]))
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleTimer2(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer1})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleTimer1})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex()
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer2})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer2})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
| gpl-2.0 | 2,415,394,993,166,281,700 | 31.290249 | 179 | 0.692275 | false |
treww/counters | server/performance_server.py | 1 | 4745 | __author__ = 'treww'
import json
import MySQLdb as mysql
import http.client
import tornado.ioloop
import tornado.web
class _DatabaseRows:
_cursor = None
def __init__(self, cursor):
self._cursor = cursor
def __iter__(self):
return self
def __next__(self):
row = self._cursor.fetchone()
if row:
return row
raise StopIteration
class _CountersDB:
_db = None
def __init__(self):
host = 'rykovanov.com'
user ='counters'
password = 'qwerty'
database='counters'
self._db = mysql.connect(host=host, user=user, passwd=password, db=database)
def select_group(self, group_id):
c = self._db.cursor()
if group_id:
c.execute('select * from groups where groupid = {}'.format(group_id))
else:
c.execute('select * from groups where groupid = parentid')
row = c.fetchone()
return row
def select_sub_groups(self, parent_id):
c = self._db.cursor()
if parent_id:
c.execute('select * from groups where parentid = {} and not groupid = parentid'.format(parent_id))
else:
c.execute('select * from groups where not groupid = parentid and parentid in '
'(select groupid from groups where groupid = parentid)')
return _DatabaseRows(c)
def add_group(self, parent_id, name):
c = self._db.cursor()
c.execute('insert into groups (parentid, name) values ({}, "{}")'.format(parent_id, name))
self._db.commit()
c.execute("select last_insert_id()")
row = c.fetchone()
return row[0]
def delete_group(self, group_id):
c = self._db.cursor()
c.execute('delete from groups where groupid={}'.format(group_id))
self._db.commit()
class _CountersRequest(tornado.web.RequestHandler):
def _convert_to_group(self, row):
group = {
"id" : int(row[0]),
"parent_id" : int(row[1]),
"name": str(row[2])
}
return group
class _GroupInfo(_CountersRequest):
def get(self, group_id=None):
db = _CountersDB()
row = db.select_group(group_id)
if not row:
self.set_status(http.client.NOT_FOUND)
return
self.set_header('Content-Type', 'application/json; charset=utf-8')
groups = [ self._convert_to_group(row) ]
self.write(json.dumps(groups))
def post(self, group_id):
group_id = int(group_id) #ensure passed parameter is a number
if len(self.request.body) > 512:
self.send_error(http.client.BAD_REQUEST)
return
group = json.loads(self.request.body.decode('utf-8'))
if not isinstance(group, list) or len(group) != 1:
self.send_error(http.client.BAD_REQUEST)
return
db = _CountersDB()
new_group_id = db.add_group(group_id, group[0]['name'])
self.set_header("Location", '/api/v1/groups/{}'.format(new_group_id))
self.set_status(http.client.CREATED)
def delete(self, group_id):
group_id = int(group_id) #ensure passed parameter is a number
db = _CountersDB()
db.delete_group(group_id)
#TODO Support deletion of multiple goroups inside group specified in the request.
#TODO HTTP/1.1 DELETE /api/v1/groups/6
#TODO [
#TODO {"id": 12}
#TODO {"id": 14}
#TODO ]
class _BrowseGroup(_CountersRequest):
def get(self, group_id=None):
self.set_header('Content-Type', 'application/json; charset=utf-8')
db = _CountersDB()
rows = db.select_sub_groups(int(group_id) if group_id else None)
self.write("[")
first = True
for row in rows:
group = self._convert_to_group(row)
if first == False:
self.write(",")
self.write(json.dumps(group))
first = False
self.write("]")
class _ListCounters(_CountersRequest):
def get(self):
self.set_header('Content-Type', 'application/json; charset=utf-8')
self.write(json.dumps({ "name" : "hello"}))
class _MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
class PerformanceCountersServer:
def run(self):
application = tornado.web.Application([
(r'/', _MainHandler),
(r'/api/v1/groups$', _GroupInfo),
(r'/api/v1/groups[/]?$', _BrowseGroup),
(r'/api/v1/groups/([0-9]+)$', _GroupInfo),
(r'/api/v1/groups/([0-9]+)/$', _BrowseGroup),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| mit | 6,965,204,526,205,543,000 | 29.22293 | 110 | 0.567545 | false |
nkmk/python-snippets | notebook/numpy_nan_replace.py | 1 | 1889 | import numpy as np
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(a)
# [[11. 12. nan 14.]
# [21. nan nan 24.]
# [31. 32. 33. 34.]]
a_nan = np.array([0, 1, np.nan, float('nan')])
print(a_nan)
# [ 0. 1. nan nan]
print(np.nan == np.nan)
# False
print(np.isnan(np.nan))
# True
print(a_nan == np.nan)
# [False False False False]
print(np.isnan(a_nan))
# [False False True True]
a_fill = np.genfromtxt('data/src/sample_nan.csv', delimiter=',', filling_values=0)
print(a_fill)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.nan_to_num(a))
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
print(a)
# [[11. 12. nan 14.]
# [21. nan nan 24.]
# [31. 32. 33. 34.]]
print(np.nan_to_num(a, copy=False))
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
print(a)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.nan_to_num(a, nan=-1))
# [[11. 12. -1. 14.]
# [21. -1. -1. 24.]
# [31. 32. 33. 34.]]
print(np.nanmean(a))
# 23.555555555555557
print(np.nan_to_num(a, nan=np.nanmean(a)))
# [[11. 12. 23.55555556 14. ]
# [21. 23.55555556 23.55555556 24. ]
# [31. 32. 33. 34. ]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.isnan(a))
# [[False False True False]
# [False True True False]
# [False False False False]]
a[np.isnan(a)] = 0
print(a)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
a[np.isnan(a)] = np.nanmean(a)
print(a)
# [[11. 12. 23.55555556 14. ]
# [21. 23.55555556 23.55555556 24. ]
# [31. 32. 33. 34. ]]
| mit | 8,838,509,263,677,381,000 | 21.759036 | 82 | 0.51244 | false |
carlos-ferras/Sequence-ToolKit | controller/genrep/dialogs/apply_this_to.py | 1 | 5418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import partial
from model.handle_config import ConfigHandler
from view.genrep.dialogs.ui_apply_this_to import Ui_apply_to
from view.dialogs.base_dialog import BaseDialog
class ApplyThisTo(BaseDialog, Ui_apply_to):
def __init__(self, profile_parameters, parent=None):
BaseDialog.__init__(self, parent)
self.setupUi(self)
self.config_handler = ConfigHandler()
self.push_button_accept.clicked.connect(self.accept)
self.push_button_cancel.clicked.connect(self.close)
for parameter in self.getConfiguration('parameters', 'GENREP'):
self.criterion_1.addItem(profile_parameters[parameter])
self.criterion_2.addItem(profile_parameters[parameter])
self.criterion_3.addItem(profile_parameters[parameter])
self.criterion_4.addItem(profile_parameters[parameter])
self.criterion_1.currentIndexChanged.connect(partial(self.criterionChange, self.criterion_1))
self.criterion_2.currentIndexChanged.connect(partial(self.criterionChange, self.criterion_2))
self.criterion_3.currentIndexChanged.connect(partial(self.criterionChange, self.criterion_3))
self.criterion_4.currentIndexChanged.connect(partial(self.criterionChange, self.criterion_4))
self.condition_1.currentIndexChanged.connect(partial(self.conditionChange, self.condition_1))
self.condition_2.currentIndexChanged.connect(partial(self.conditionChange, self.condition_2))
self.condition_3.currentIndexChanged.connect(partial(self.conditionChange, self.condition_3))
self.condition_4.currentIndexChanged.connect(partial(self.conditionChange, self.condition_4))
self.value_1.textChanged.connect(partial(self.valueChange, self.value_1))
self.value_2.textChanged.connect(partial(self.valueChange, self.value_2))
self.value_3.textChanged.connect(partial(self.valueChange, self.value_3))
def criterionChange(self, criterion):
enabled = False
if criterion.currentIndex() > 0:
enabled = True
if criterion == self.criterion_1:
condition = self.condition_1
elif criterion == self.criterion_2:
condition = self.condition_2
elif criterion == self.criterion_3:
condition = self.condition_3
else:
condition = self.condition_4
condition.setEnabled(enabled)
if not enabled:
condition.setCurrentIndex(0)
def conditionChange(self, condition):
enabled_value = False
criterion = False
enabled_criterion = False
if 0 < condition.currentIndex() < 3:
enabled_criterion = True
elif condition.currentIndex() == 3:
enabled_value = True
if condition == self.condition_1:
value = self.value_1
criterion = self.criterion_2
elif condition == self.condition_2:
value = self.value_2
criterion = self.criterion_3
elif condition == self.condition_3:
value = self.value_3
criterion = self.criterion_4
else:
value = self.value_4
value.setEnabled(enabled_value)
if not enabled_value:
value.setText('')
if criterion:
criterion.setEnabled(enabled_criterion)
if not enabled_criterion:
criterion.setCurrentIndex(0)
def valueChange(self, value):
criterion = False
enabled = False
if value.text() and value.text() is not None:
enabled = True
if value == self.value_1:
criterion = self.criterion_2
elif value == self.value_2:
criterion = self.criterion_3
elif value == self.value_3:
criterion = self.criterion_4
if criterion:
criterion.setEnabled(enabled)
if not enabled:
criterion.setCurrentIndex(0)
def getData(self):
filters = []
levels = (
(self.criterion_1, self.condition_1, self.value_1),
(self.criterion_2, self.condition_2, self.value_2),
(self.criterion_3, self.condition_3, self.value_3),
(self.criterion_4, self.condition_4, self.value_4),
)
for level in levels:
criterion = level[0]
condition = level[1]
value = level[2]
if criterion.isEnabled():
if 0 < condition.currentIndex() < 3:
filters.append((
criterion.currentIndex() - 1,
condition.currentIndex(),
None
))
elif condition.currentIndex() == 3:
if value.text() and value.text() is not None:
filters.append((
criterion.currentIndex() - 1,
condition.currentIndex(),
value.text()
))
return tuple(filters)
def getConfiguration(self, key, file):
return self.config_handler.configurations[file][key]
def setConfiguration(self, key, value, file):
self.config_handler.configurations[file][key] = value
self.config_handler.save(file)
| gpl-3.0 | -4,717,723,878,453,943,000 | 38.26087 | 101 | 0.607973 | false |
NotBobTheBuilder/robogrid | robogrid/robot.py | 1 | 2297 | from .grids import Simple_Grid
class Robot(object):
def __init__(self, name, grid=None):
self.name = name
if grid == None:
grid = Simple_Grid(20)
self.grid = grid
start_pos = self.grid.free_position()
if start_pos == None:
raise ValueError("No space in proposed grid")
self._heading = 0
self._x, self._y = start_pos
def __repr__(self):
summary = {
"name": self.name,
"grid": repr(self.grid)
}
return 'Robot("{name}", {grid})'.format(**summary)
def __str__(self):
arrow = "^>v<"[self.heading]
result = ""
for row_i, row in enumerate(self.grid):
for col_i, cell in enumerate(row):
if (col_i, row_i) == self.pos:
result += arrow
else:
result += self.grid.char(cell)
result += "\n"
return result
def forward(self):
if not self.can_move_forward():
return
if self.heading == 0:
self._y -= 1
elif self.heading == 1:
self._x += 1
elif self.heading == 2:
self._y += 1
elif self.heading == 3:
self._x -= 1
def can_move_forward(self):
return not self.cell_at_heading_blocked()
def cell_at_heading_blocked(self, heading=None):
return {
0: self.grid[self.x, self.y-1],
1: self.grid[self.x+1, self.y],
2: self.grid[self.x, self.y+1],
3: self.grid[self.x-1, self.y],
}[heading or self.heading]
def backward(self):
self.right()
self.right()
self.forward()
self.right()
self.right()
def right(self):
self.heading += 1
def left(self):
self.heading -= 1
@property
def heading(self):
return self._heading
@heading.setter
def heading(self, val):
self._heading = val % 4
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def pos(self):
return self.x, self.y
def is_finished(self):
return self.x, self.y == self.grid.width - 2, self.grid.height - 2
| mit | 6,702,044,922,090,397,000 | 23.178947 | 74 | 0.491946 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/encodings/iso8859_16.py | 1 | 12859 | """ Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-16',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u20ac' # 0xA4 -> EURO SIGN
u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON
u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE
u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK
u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| mit | -2,447,403,365,782,937,600 | 40.347267 | 109 | 0.578505 | false |
alsoicode/django-maintenancemode-2 | testproject/testproject/urls.py | 1 | 1160 | """testproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from distutils.version import StrictVersion
from django.conf.urls import include
from django.contrib import admin
from maintenancemode.utils.settings import DJANGO_VERSION
if DJANGO_VERSION >= StrictVersion('2.0'):
from django.urls import path
urlpatterns = [
path(r'admin/', admin.site.urls),
path(r'', include('app.urls')),
]
else:
from django.conf.urls import url
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('app.urls', namespace='app')),
]
| apache-2.0 | 1,782,432,068,853,554,000 | 34.151515 | 77 | 0.67931 | false |
Grumbel/scatterbackup | scatterbackup/format.py | 1 | 7129 | # ScatterBackup - A chaotic backup solution
# Copyright (C) 2016 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
import datetime
from pwd import getpwuid
from grp import getgrgid
from scatterbackup.units import bytes2human_decimal, bytes2human_binary, units
from scatterbackup.time import format_time
class Bytes:
def __init__(self, count):
self.count = count
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, unit = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(unit), str_spec)
def as_str(self, unit):
if unit == "h":
return bytes2human_decimal(self.count)
elif unit == "H":
return bytes2human_binary(self.count)
elif unit == "r":
return "{}".format(self.count)
elif unit == "B":
return "{}{}".format(self.count, unit)
elif unit in units:
return "{:.2f}{}".format(self.count / units[unit], unit)
else:
raise Exception("unknown unit: {}".format(unit))
class Checksum:
def __init__(self, checksum):
self.checksum = checksum
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
if len(r) == 2:
str_spec = r[0]
cut = int(r[1])
else:
str_spec = r[0]
cut = None
return format(self.checksum[0:cut], str_spec)
class Time:
def __init__(self, time):
self.time = time
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, time_spec = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(time_spec), str_spec)
def as_str(self, spec):
if spec == 'r':
return str(self.time)
elif spec == 'iso' or spec == 'i':
return format_time(self.time)
elif spec == 'h':
if self.time is None:
return " <unknown> "
else:
dt = datetime.datetime.fromtimestamp(self.time / 1000**3)
return dt.strftime("%F %T")
else:
if self.time is None:
return "<unknown>"
else:
dt = datetime.datetime.fromtimestamp(self.time / 1000**3)
return dt.strftime(spec)
class Mode:
def __init__(self, mode):
self.mode = mode
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, spec = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(spec), str_spec)
def as_str(self, spec):
if spec == 'h':
return self.as_str_human()
else:
return str(self.mode)
def as_str_human(self):
mode = self.mode
s = ""
if stat.S_ISDIR(mode):
s += "d"
elif stat.S_ISCHR(mode):
s += "c"
elif stat.S_ISBLK(mode):
s += "b"
elif stat.S_ISREG(mode):
s += "-"
elif stat.S_ISFIFO(mode):
s += "p"
elif stat.S_ISLNK(mode):
s += "l"
elif stat.S_ISSOCK(mode):
s += "s"
else:
s += "?"
if mode & stat.S_IRUSR:
s += "r"
else:
s += "-"
if mode & stat.S_IWUSR:
s += "w"
else:
s += "-"
if mode & stat.S_IXUSR:
s += "s" if mode & stat.S_ISGID else "x"
else:
s += "S" if mode & stat.S_ISGID else "-"
if mode & stat.S_IRGRP:
s += "r"
else:
s += "-"
if mode & stat.S_IWGRP:
s += "w"
else:
s += "-"
if mode & stat.S_IXGRP:
s += "s" if mode & stat.S_ISGID else "x"
else:
s += "S" if mode & stat.S_ISGID else "-"
if mode & stat.S_IROTH:
s += "r"
else:
s += "-"
if mode & stat.S_IWOTH:
s += "w"
else:
s += "-"
if mode & stat.S_IXOTH: # stat.S_ISVTX:
s += "t" if mode & stat.S_ISGID else "x"
else:
s += "T" if mode & stat.S_ISGID else "-"
return s
class RelPath:
def __init__(self, path):
self.path = path
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, spec = r if len(r) == 2 else (r[0], "")
return format(self.as_str(spec), str_spec)
def as_str(self, spec):
return os.path.relpath(self.path, spec)
class FileInfoFormatter:
def __init__(self, fileinfo):
self.fileinfo = fileinfo
def __getitem__(self, key):
# FIXME: potential security hole
return self.__getattribute__(key)()
def path(self):
return self.fileinfo.path
def relpath(self):
return RelPath(self.fileinfo.path)
def dev(self):
return self.fileinfo.dev
def ino(self):
return self.fileinfo.ino
def mode(self):
return Mode(self.fileinfo.mode)
def nlink(self):
return self.fileinfo.nlink
def uid(self):
return self.fileinfo.uid
def gid(self):
return self.fileinfo.gid
def owner(self):
try:
return getpwuid(self.fileinfo.uid).pw_name # FIXME: maybe cache this?
except KeyError as err:
return str(self.fileinfo.uid)
def group(self):
try:
return getgrgid(self.fileinfo.gid).gr_name # FIXME: maybe cache this?
except KeyError as err:
return str(self.fileinfo.gid)
def rdev(self):
return self.fileinfo.rdev
def size(self):
return Bytes(self.fileinfo.size)
def blksize(self):
return self.fileinfo.blksize
def blocks(self):
return self.fileinfo.blocks
def atime(self):
return Time(self.fileinfo.atime)
def ctime(self):
return Time(self.fileinfo.ctime)
def mtime(self):
return Time(self.fileinfo.mtime)
def time(self): return Time(self.fileinfo.time)
def birth(self):
return self.fileinfo.birth
def death(self):
return self.fileinfo.death
def sha1(self):
return Checksum(self.fileinfo.blob.sha1 if self.fileinfo.blob else "<sha1:unknown>")
def md5(self):
return Checksum(self.fileinfo.blob.md5 if self.fileinfo.blob else "<md5:unknown>")
def target(self):
return self.fileinfo.target
# EOF #
| gpl-3.0 | -175,609,349,884,586,240 | 24.280142 | 92 | 0.534016 | false |
carefree0910/MachineLearning | _Dist/NeuralNetworks/_Tests/_UnitTests/b_Advanced.py | 1 | 2339 | import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import unittest
import numpy as np
from Util.Util import DataUtil
from _Dist.NeuralNetworks.e_AdvancedNN.NN import Advanced
from _Dist.NeuralNetworks._Tests._UnitTests.UnitTestUtil import clear_cache
base_params = {
"name": "UnitTest",
"data_info": {
"numerical_idx": [True] * 6 + [False],
"categorical_columns": []
},
"model_param_settings": {"n_epoch": 3, "max_epoch": 5}
}
nn = Advanced(**base_params)
train_set, cv_set, test_set = DataUtil.gen_special_linear(1000, 2, 2, 2, one_hot=False)
class TestAdvancedNN(unittest.TestCase):
def test_00_train(self):
self.assertIsInstance(
nn.fit(*train_set, *cv_set, verbose=0), Advanced,
msg="Train failed"
)
def test_01_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
def test_02_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Evaluation failed")
def test_03_save(self):
self.assertIsInstance(nn.save(), Advanced, msg="Save failed")
def test_04_load(self):
global nn
nn = Advanced(**base_params).load()
self.assertIsInstance(nn, Advanced, "Load failed")
def test_05_re_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
def test_06_re_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Re-Evaluation failed")
def test_07_re_train(self):
self.assertIsInstance(
nn.fit(*train_set, *cv_set, verbose=0), Advanced,
msg="Re-Train failed"
)
def test_99_clear_cache(self):
clear_cache()
if __name__ == '__main__':
unittest.main()
| mit | -1,695,884,148,561,220,400 | 32.898551 | 108 | 0.637024 | false |
paiser/component-management | t.py | 1 | 11560 | #!/usr/bin/env python
from storm.locals import *
from IMSmotorcfg import *
from IMSmotor import *
import sys
class Instrument(object):
__storm_table__ = "instrument"
id = Int(primary=True)
name = Unicode()
#locations = Unicode()
def __init__(self, name):
self.name = name
class Loc(object):
__storm_table__ = "location"
#instrument_id = Int(primary=True)
id = Int(primary=True)
instrument_id = Int()
instrument = Reference(instrument_id, Instrument.id)
name = Unicode()
###components = Unicode()
#locations = Reference(Instrument.locations, name)
class Location(Loc):
def __init__(self, name):
self.name = name
#self.initialize()
class Component(Location):
__storm_table__ = "component"
id = Int(primary=True)
location_id = Int()
location = Reference(location_id, Location.id)
def __init__(self, name):
self.name = name
class Device(Component):
__storm_table__ = "device"
id = Int(primary=True)
component_id = Int()
component = Reference(component_id, Component.id)
def __init__(self, name):
self.name = name
class MotorManager(object):
def __init__(self, scheme, user, passwd, hostname, port, dbname, parent=None):
dbpars = (scheme, user, passwd, hostname, port, dbname)
database = create_database("%s://%s:%s@%s:%s/%s" % dbpars)
self.store = Store(database)
def create_alltables(self):
try:
self.store.execute("CREATE TABLE instrument "
"(id INTEGER PRIMARY KEY, name VARCHAR)", noresult=True)
self.store.execute("CREATE TABLE location "
"(id INTEGER PRIMARY KEY, instrument_id INTEGER, name VARCHAR)", noresult=True)
self.store.execute("CREATE TABLE component "
"(id INTEGER PRIMARY KEY, instrument_id INTEGER, location_id INTEGER, name VARCHAR)", noresult=True)
self.store.execute("CREATE TABLE device "
"(id INTEGER PRIMARY KEY, instrument_id INTEGER, location_id INTEGER, component_id INTEGER, name VARCHAR)", noresult=True)
self.store.execute("CREATE TABLE cfgparameter "
"(name VARCHAR(20) NOT NULL DEFAULT '', cfg_id INTEGER(10) NOT NULL DEFAULT '0',value VARCHAR(45) NOT NULL DEFAULT '', PRIMARY KEY (name, cfg_id))", noresult=True)
self.store.execute("CREATE TABLE cfgparameterinfo "
"(name VARCHAR(20) NOT NULL DEFAULT '', info VARCHAR(255) NOT NULL DEFAULT '', PRIMARY KEY (name))", noresult=True)
self.store.execute("CREATE TABLE imsmotorcfg "
"(id INTEGER PRIMARY KEY NOT NULL, component_name VARCHAR(25), motor_name INTEGER(10), name VARCHAR(40) NOT NULL DEFAULT '', description VARCHAR(255), signature VARCHAR(40), date datetime NOT NULL DEFAULT '')", noresult=True)
self.store.execute("CREATE TABLE imsmotor "
"(component_name VARCHAR(25) NOT NULL DEFAULT '', name VARCHAR(25) NOT NULL DEFAULT '', alias VARCHAR(30), PRIMARY KEY (component_name, name))", noresult=True)
except:
pass
# References:
Instrument.locations = ReferenceSet(Instrument.id, Location.instrument_id)
Location.components = ReferenceSet(Location.id, Component.location_id)
def commit(self):
self.store.commit()
def roolback(self):
self.store.rollback()
def addinstrument(self, instrument_name=None):
# CHECK IF INSTRUMENT ALREADY EXISTS:
instrument = self.getinstrument(instrument_name)
if instrument:
if instrument.name == instrument_name:
print 'Instrument %s already exists' % instrument.name
return instrument
newinstrument = self.store.add(Instrument(unicode(instrument_name)))
print 'Creating New Instrument', newinstrument.name
self.commit()
return newinstrument
def addlocation(self, location_name=None, instrument=None):
if instrument:
location = self.getlocation(location_name)
if location:
if location.name == location_name:
print 'Location %s already exists in' % location.name, location.instrument.name
return location
newlocation = self.store.add(Location(unicode(location_name)))
newlocation.instrument = instrument
print 'Creating New Location', newlocation.name, 'in instrument', instrument.name
self.commit()
return newlocation
return None
def addcomponent(self, component_name=None, location=None):
if location:
component = self.getcomponent(component_name)
if component:
if component.name == component_name:
print 'Component %s already exists in' % component.name, component.location.name
return component
newcomponent = self.store.add(Component(unicode(component_name)))
newcomponent.location = location
newcomponent.instrument = location.instrument
print 'Creating New Component', newcomponent.name, 'in location', location.name, 'in instrument', newcomponent.instrument.name
self.commit()
return newcomponent
return None
def adddevice(self, device_name=None, component=None):
if component:
device = self.getdevice(device_name)
if device:
if device.name == device_name:
print 'Device %s already exists in' % device.name, device.component.name
return device
newdevice = self.store.add(Device(unicode(device_name)))
newdevice.component = component
newdevice.location = component.location
newdevice.instrument = component.instrument
print 'Creating New Device', newdevice.name, 'in component', component.name, 'in location', component.location.name, 'in instrument', newdevice.instrument.name
self.commit()
return newdevice
return None
def getinstrument(self, instrument_name):
return self.store.find(Instrument, Instrument.name == unicode(instrument_name)).one()
def getlocation(self, location_name):
return self.store.find(Location, Location.name == unicode(location_name)).one()
def getcomponent(self, component_name):
return self.store.find(Component, Component.name == unicode(component_name)).one()
def getdevice(self, device_name):
return self.store.find(Device, Device.name == unicode(device_name)).one()
def getinstruments(self):
result = self.store.find(Instrument, Instrument.name==unicode('*'))
result.order_by(Instrument.name)
return result.count()
def getlocations(self, instrument):
locations = list()
for location in instrument.locations:
locations.append(str(location.name))
return locations
def getcomponents(self, location):
components = list()
for component in location.components:
components.append(str(component.name))
return components
def getmotors(self, component):
motors = list()
for motor in component.motors:
motors.append(str(motor.name))
return motors
def delinstrument(self, instrument):
self.store.remove(instrument)
del instrument
self.commit()
def dellocation(self, location):
self.store.remove(location)
del location
self.commit()
def delcomponent(self, component):
self.store.remove(component)
del component
self.commit()
def deldevice(self, device):
self.store.remove(device)
del device
self.commit()
def addIMSconfiguration(self, motor, config=None):
if config:
newconfig = self.store.add(IMSMotorCfg(motor.name))
print 'Creating New Configuration', newconfig.name
self.commit()
return newconfig
return None
def addIMSmotor(self, motor, component=None):
if component:
newimsmotor = self.store.add(IMSMotor(motor.name, component.name))
print 'Creating New IMSmotor', newimsmotor.name
self.commit()
return newimsmotor
return None
if __name__ == '__main__':
import platform
scheme = 'sqlite'
user = 'pcds'
passwd = 'pcds2014'
hostname = 'localhost' # to change
port = ''
mypc = platform.system()
if mypc == 'Linux':
dbdirectory = '/reg/neh/home1/paiser'
elif mypc == 'Osx':
dbdirectory = '/Users/paiser'
else:
dbdirectory = '/Users/paiser'
dbname = '%s/aaaadb' % dbdirectory
manager = MotorManager(scheme, user, passwd, hostname, port, dbname)
manager.create_alltables()
amo = manager.addinstrument('AMO')
cxi = manager.addinstrument('CXI')
lamp = manager.addlocation('LAMP', amo)
#lamp1 = manager.addlocation('LAMP1', amo)
#sl02 = manager.addcomponent('slits02', lamp)
sb2 = manager.addlocation('SB2', cxi)
sb3 = manager.addlocation('SB3', cxi)
sl00 = manager.addcomponent('slits00', lamp)
sl01 = manager.addcomponent('slits01', sb2)
sl02 = manager.addcomponent('slits02', sb2)
sl03 = manager.addcomponent('slits03', sb3)
sl04 = manager.addcomponent('slits04', sb3)
sl05 = manager.addcomponent('slits05', sb3)
print '\nTHIS IS OK'
print 'sb2.components.count()', sb2.components.count()
print 'sb3.components.count()', sb3.components.count()
print '\nTHIS IS OK'
print 'amo.locations.count()', amo.locations.count()
print 'cxi.locations.count()', cxi.locations.count()
print '\nTHIS IS OK'
print manager.getlocations(amo)
print manager.getlocations(cxi)
print '\nTHIS IS OK'
print manager.getcomponents(sb2)
print manager.getcomponents(sb3)
print manager.getcomponents(lamp)
print'\nDEL COMPONENT, LOCATION, INSTRUMENT'
print manager.getcomponents(sb3)
print 'Deleting', sl05.name
manager.delcomponent(sl05)
print manager.getcomponents(sb3)
print 'Adding Motors to Components'
mot00 = manager.adddevice('AMO:TST:MMS:01', sl00)
mot01 = manager.adddevice('CXI:TST:MMS:01', sl03)
mot02 = manager.adddevice('CXI:TST:MMS:02', sl01)
mot03 = manager.adddevice('CXI:TST:MMS:03', sl03)
print 'mot00', mot00.name, 'added'
print 'mot01', mot01.name, 'added'
print 'mot02', mot02.name, 'added'
print 'mot03', mot03.name, 'added'
print 'ONGOING: Now the configuration setup and storage...'
config01 = ('rc=0','el=1')
config02 = ('rc=1','el=1')
cfg01 = manager.addIMSconfiguration(mot01, config01)
cfg02 = manager.addIMSconfiguration(mot02, config02)
print 'cfg01', cfg01
print 'cfg02', cfg02
print sl01.name
m01 = manager.addIMSmotor(mot01, sl01)
#m02 = manager.addIMSmotor(mot02, sl02)
#print 'm01', m01
| gpl-2.0 | 5,738,453,091,251,084,000 | 38.453925 | 256 | 0.609689 | false |
SasView/sasmodels | sasmodels/models/polymer_micelle.py | 1 | 5987 | r"""
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference carefully.
Definition
----------
The 1D scattering intensity for this model is calculated according to
the equations given by Pedersen (Pedersen, 2000), summarised briefly here.
The micelle core is imagined as $N$ = *n_aggreg* polymer heads, each of volume
$V_\text{core}$, which then defines a micelle core of radius $r$ = *r_core*,
which is a separate parameter even though it could be directly determined.
The Gaussian random coil tails, of gyration radius $R_g$, are imagined
uniformly distributed around the spherical core, centred at a distance
$r + d \cdot R_g$ from the micelle centre, where $d$ = *d_penetration* is
of order unity. A volume $V_\text{corona}$ is defined for each coil. The
model in detail seems to separately parameterize the terms for the shape
of $I(Q)$ and the relative intensity of each term, so use with caution
and check parameters for consistency. The spherical core is monodisperse,
so it's intensity and the cross terms may have sharp oscillations (use $q$
resolution smearing if needs be to help remove them).
.. math::
P(q) &= N^2\beta^2_s\Phi(qr)^2 + N\beta^2_cP_c(q)
+ 2N^2\beta_s\beta_cS_{sc}(q) + N(N-1)\beta_c^2S_{cc}(q) \\
\beta_s &= V_\text{core}(\rho_\text{core} - \rho_\text{solvent}) \\
\beta_c &= V_\text{corona}(\rho_\text{corona} - \rho_\text{solvent})
where $\rho_\text{core}$, $\rho_\text{corona}$ and $\rho_\text{solvent}$ are
the scattering length densities *sld_core*, *sld_corona* and *sld_solvent*.
For the spherical core of radius $r$
.. math::
\Phi(qr)= \frac{\sin(qr) - qr\cos(qr)}{(qr)^3}
whilst for the Gaussian coils
.. math::
P_c(q) &= 2 [\exp(-Z) + Z - 1] / Z^2 \\
Z &= (q R_g)^2
The sphere to coil (core to corona) and coil to coil (corona to corona) cross
terms are approximated by:
.. math::
S_{sc}(q) &= \Phi(qr)\psi(Z)
\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \\
S_{cc}(q) &= \psi(Z)^2
\left[\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \right]^2 \\
\psi(Z) &= \frac{[1-\exp^{-Z}]}{Z}
Validation
----------
$P(q)$ above is multiplied by *ndensity*, and a units conversion of $10^{-13}$,
so *scale* is likely 1.0 if the scattering data is in absolute units. This
model has not yet been independently validated.
References
----------
#. J Pedersen, *J. Appl. Cryst.*, 33 (2000) 637-640
Authorship and Verification
----------------------------
* **Translated by :** Richard Heenan **Date:** March 20, 2016
* **Last modified by:** Paul Kienzle **Date:** November 29, 2017
* **Last reviewed by:** Steve King **Date:** November 30, 2017
"""
import numpy as np
from numpy import inf, pi
name = "polymer_micelle"
title = "Polymer micelle model"
description = """
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference to Pedersen and full documentation carefully.
"""
category = "shape:sphere"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [
["ndensity", "1e15/cm^3", 8.94, [0.0, inf], "", "Number density of micelles"],
["v_core", "Ang^3", 62624.0, [0.0, inf], "", "Core volume "],
["v_corona", "Ang^3", 61940.0, [0.0, inf], "", "Corona volume"],
["sld_solvent", "1e-6/Ang^2", 6.4, [0.0, inf], "sld", "Solvent scattering length density"],
["sld_core", "1e-6/Ang^2", 0.34, [0.0, inf], "sld", "Core scattering length density"],
["sld_corona", "1e-6/Ang^2", 0.8, [0.0, inf], "sld", "Corona scattering length density"],
["radius_core", "Ang", 45.0, [0.0, inf], "", "Radius of core ( must be >> rg )"],
["rg", "Ang", 20.0, [0.0, inf], "", "Radius of gyration of chains in corona"],
["d_penetration", "", 1.0, [-inf, inf], "", "Factor to mimic non-penetration of Gaussian chains"],
["n_aggreg", "", 6.0, [-inf, inf], "", "Aggregation number of the micelle"],
]
# pylint: enable=bad-whitespace, line-too-long
single = False
source = ["lib/sas_3j1x_x.c", "polymer_micelle.c"]
def random():
"""Return a random parameter set for the model."""
radius_core = 10**np.random.uniform(1, 3)
rg = radius_core * 10**np.random.uniform(-2, -0.3)
d_penetration = np.random.randn()*0.05 + 1
n_aggreg = np.random.randint(3, 30)
# volume of head groups is the core volume over the number of groups,
# with a correction for packing fraction of the head groups.
v_core = 4*pi/3*radius_core**3/n_aggreg * 0.68
# Rg^2 for gaussian coil is a^2n/6 => a^2 = 6 Rg^2/n
# a=2r => r = Rg sqrt(3/2n)
# v = 4/3 pi r^3 n => v = 4/3 pi Rg^3 (3/2n)^(3/2) n = pi Rg^3 sqrt(6/n)
tail_segments = np.random.randint(6, 30)
v_corona = pi * rg**3 * np.sqrt(6/tail_segments)
V = 4*pi/3*(radius_core + rg)**3
pars = dict(
background=0,
scale=1e7/V,
ndensity=8.94,
v_core=v_core,
v_corona=v_corona,
radius_core=radius_core,
rg=rg,
d_penetration=d_penetration,
n_aggreg=n_aggreg,
)
return pars
tests = [
[{}, 0.01, 15.3532],
]
# RKH 20Mar2016 - need to check whether the core & corona volumes are per
# monomer ??? and how aggregation number works!
# renamed from micelle_spherical_core to polymer_micelle,
# moved from shape-independent to spheres section.
# Ought to be able to add polydisp to core? And add ability to x by S(Q) ?
| bsd-3-clause | 991,960,999,830,574,600 | 38.649007 | 113 | 0.632036 | false |
oldm/OldMan | oldman/resource/manager.py | 1 | 7765 | from oldman.resource.resource import ClientResource
from oldman.store.selector import DataStoreSelector
from oldman.model.manager import ClientModelManager
DEFAULT_MODEL_NAME = "Default_Client"
class ClientResourceManager:
"""
TODO: describe
"""
def __init__(self, data_stores, schema_graph=None, attr_extractor=None, oper_extractor=None,
declare_default_operation_functions=True):
self._model_manager = ClientModelManager(self, schema_graph=schema_graph, attr_extractor=attr_extractor,
oper_extractor=oper_extractor,
declare_default_operation_functions=declare_default_operation_functions)
self._store_selector = DataStoreSelector(data_stores)
# Default model
self._model_manager.create_model(DEFAULT_MODEL_NAME, {u"@context": {}}, self, untyped=True,
iri_prefix=u"http://localhost/.well-known/genid/client/",
is_default=True)
@property
def model_manager(self):
return self._model_manager
def declare_method(self, method, name, class_iri):
"""Attaches a method to the :class:`~oldman.resource.Resource` objects that are instances of a given RDFS class.
Like in Object-Oriented Programming, this method can be overwritten by attaching a homonymous
method to a class that has a higher inheritance priority (such as a sub-class).
To benefit from this method (or an overwritten one), :class:`~oldman.resource.Resource` objects
must be associated to a :class:`~oldman.model.Model` that corresponds to the RDFS class or to one of its
subclasses.
:param method: Python function that takes as first argument a :class:`~oldman.resource.Resource` object.
:param name: Name assigned to this method.
:param class_iri: Targeted RDFS class. If not overwritten, all the instances
(:class:`~oldman.resource.Resource` objects) should inherit this method.
"""
models = self._model_manager.find_descendant_models(class_iri)
for model in models:
if model.class_iri is None:
continue
model.declare_method(method, name, class_iri)
def new(self, id=None, types=None, hashless_iri=None, collection_iri=None, **kwargs):
"""Creates a new :class:`~oldman.resource.Resource` object **without saving it** in the `data_store`.
The `kwargs` dict can contains regular attribute key-values that will be assigned to
:class:`~oldman.attribute.OMAttribute` objects.
:param id: IRI of the new resource. Defaults to `None`.
If not given, the IRI is generated by the IRI generator of the main model.
:param types: IRIs of RDFS classes the resource is instance of. Defaults to `None`.
Note that these IRIs are used to find the models of the resource
(see :func:`~oldman.resource.manager.ResourceManager.find_models_and_types` for more details).
:param hashless_iri: hash-less IRI that MAY be considered when generating an IRI for the new resource.
Defaults to `None`. Ignored if `id` is given. Must be `None` if `collection_iri` is given.
:param collection_iri: IRI of the controller to which this resource belongs. This information
is used to generate a new IRI if no `id` is given. The IRI generator may ignore it.
Defaults to `None`. Must be `None` if `hashless_iri` is given.
:return: A new :class:`~oldman.resource.Resource` object.
"""
if (types is None or len(types) == 0) and len(kwargs) == 0:
name = id if id is not None else ""
self._logger.info(u"""New resource %s has no type nor attribute.
As such, nothing is stored in the data graph.""" % name)
# Store of the resource
store = self._store_selector.select_store(id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs)
return ClientResource(self, self._model_manager, store, id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs)
def create(self, id=None, types=None, hashless_iri=None, collection_iri=None, **kwargs):
"""Creates a new resource and save it in the `data_store`.
See :func:`~oldman.resource.manager.ResourceManager.new` for more details.
"""
return self.new(id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs).save()
def get(self, id=None, types=None, hashless_iri=None, eager_with_reversed_attributes=True, **kwargs):
"""See :func:`oldman.store.datastore.DataStore.get`."""
#TODO: consider parallelism
store_resources = [store.get(id=id, types=types, hashless_iri=hashless_iri,
eager_with_reversed_attributes=eager_with_reversed_attributes, **kwargs)
for store in self._store_selector.select_stores(id=id, types=types,
hashless_iri=hashless_iri, **kwargs)]
returned_store_resources = filter(lambda x: x, store_resources)
resources = self._model_manager.convert_store_resources(returned_store_resources)
resource_count = len(resources)
if resource_count == 1:
return resources[0]
elif resource_count == 0:
return None
#TODO: find a better exception and explain better
#TODO: see if relevant
raise Exception("Non unique object")
def filter(self, types=None, hashless_iri=None, limit=None, eager=False, pre_cache_properties=None, **kwargs):
"""See :func:`oldman.store.datastore.DataStore.filter`."""
#TODO: support again generator. Find a way to aggregate them.
resources = [r for store in self._store_selector.select_stores(types=types, hashless_iri=hashless_iri,
pre_cache_properties=pre_cache_properties,
**kwargs)
for r in store.filter(types=types, hashless_iri=hashless_iri, limit=limit, eager=eager,
pre_cache_properties=pre_cache_properties, **kwargs)]
return self._model_manager.convert_store_resources(resources)
def sparql_filter(self, query):
"""See :func:`oldman.store.datastore.DataStore.sparql_filter`."""
#TODO: support again generator. Find a way to aggregate them.
resources = [r for store in self._store_selector.select_sparql_stores(query)
for r in store.sparql_filter(query)]
return self._model_manager.convert_store_resources(resources)
def use_store_model(self, class_iri, data_store=None):
raise NotImplementedError("TODO: implement me here")
def import_store_models(self):
"""TODO: check possible conflicts with local models."""
for store in self._store_selector.data_stores:
for store_model in store.model_manager.models:
is_default = (store_model.class_iri is None)
self._model_manager.import_model(store_model, store,
is_default=is_default)
def get_model(self, class_name_or_iri):
return self._model_manager.get_model(class_name_or_iri)
| bsd-3-clause | 4,425,027,235,086,407,700 | 55.678832 | 121 | 0.616098 | false |
erinspace/modular-file-renderer | mfr_code_pygments/__init__.py | 1 | 1672 | # -*- coding: utf-8 -*-
from mfr import FileHandler, get_file_extension
try: # requires pygments
from mfr_code_pygments.render import render_html
renderers = {
'html': render_html,
}
except ImportError:
renderers = {}
EXTENSIONS = [
'.rb',
'.c',
'.cs',
'.ahk',
'.rs',
'.c9search_results',
'.scm',
'.vhd',
'.vbs',
'.twig',
'.jack',
'.jl',
'.js',
'.matlab',
'.tcl',
'.dot',
'.plg',
'.clj',
'.rd',
'.pl',
'.ejs',
'.scad',
'.lisp',
'.py',
'.cpp',
'.snippets',
'.css',
'.vm',
'.groovy',
'.liquid',
'.xq',
'.proto',
'.php',
'.asm',
'.sh',
'.curly',
'.hs',
'.hx',
'.tex',
'.sjs',
'.mysql',
'.html',
'.space',
'.haml',
'.cbl',
'.styl',
'.ada',
'.lucene',
'.pas',
'.tmsnippet',
'.ps1',
'.yaml',
'.soy',
'.sass',
'.scala',
'.scss',
'.ini',
'.bat',
'.glsl',
'.diff',
'.frt',
'.less',
'.erl',
'.erb',
'.toml',
'.hbs',
'.m',
'.sql',
'.json',
'.d',
'.lua',
'.as',
'.nix',
'.txt',
'.r',
'.v',
'.jade',
'.go',
'.ts',
'.md',
'.jq',
'.mc',
'.xml',
'.rhtml',
'.ml',
'.dart',
'.pgsql',
'.coffee',
'.lp',
'.ls',
'.jsx',
'.asciidoc',
'.jsp',
'.logic',
'.properties',
'.textile',
'.lsl',
'.abap',
'.ftl',
'.java',
'.cfm'
]
class Handler(FileHandler):
renderers = renderers
def detect(self, fp):
return get_file_extension(fp.name) in EXTENSIONS
| apache-2.0 | -7,561,032,911,051,015,000 | 12.704918 | 56 | 0.373804 | false |
decodio/l10n_hr | l10n_hr_vat/wizard/wizard_pdv_knjiga.py | 1 | 11523 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Author:
# mail:
# Copyright:
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import os
import uuid
from lxml import etree, objectify
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.exceptions import Warning
class pdv_knjiga(orm.TransientModel):
_name = 'pdv.knjiga'
_inherit = 'account.common.report'
_columns = {
'chart_tax_id': fields.many2one('account.tax.code', 'Chart of Tax',
help='Select Charts of Taxes', required=True,
domain=[('parent_id', '=', False)]),
'knjiga_id': fields.many2one('l10n_hr_pdv.knjiga', 'Porezna knjiga',
help='Odaberite poreznu knjigu za ispis', required=True),
'date_start': fields.date('Od datuma'),
'date_stop': fields.date('Do datuma'),
'journal_ids': fields.many2many('account.journal', 'pdv_knjiga_journal_rel', 'pdv_knjiga_id', 'journal_id',
'Journals'),
'data': fields.binary('File', readonly=True),
'name': fields.char('Filename', size=128, readonly=True),
'state': fields.selection((('choose', 'choose'), ('get', 'get'),)),
}
def _get_tax(self, cr, uid, context=None):
taxes = self.pool.get('account.tax.code').search(cr, uid, [('parent_id', '=', False)], limit=1)
return taxes and taxes[0] or False
_defaults = {
'chart_tax_id': _get_tax,
'journal_ids': [],
'state': 'choose',
}
# def export_vat(self, cr, uid, ids, context=None):
# """
# Kopiram logiku iz parsera bez potezanja parsera
# """
# if context is None:
# context = {}
def create_vat(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
datas['form'] = self.read(cr, uid, ids)[0]
if not datas['form'].get('journal_ids', False):
sql = """SELECT id FROM account_journal"""
cr.execute(sql)
datas['form']['journal_ids'] = [a for (a,) in cr.fetchall()]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
if datas['form']['knjiga_id']:
knjiga_type = self.pool.get('l10n_hr_pdv.knjiga').browse(cr, uid, datas['form']['knjiga_id']).type
else:
raise orm.except_orm(_('Knjiga nije upisana!'),
_("Knjiga je obavezan podatak kod ovog ispisa!"))
# if (datas['form']['period_from'] and not datas['form']['period_to']) or \
# (not datas['form']['period_from'] and datas['form']['period_to']):
# raise orm.except_orm(_('Krivi periodi!'),_("Potrebno je upisati oba perioda za ispis po periodima!"))
#
# if (datas['form']['date_start'] and not datas['form']['date_stop']) or \
# (not datas['form']['date_start'] and datas['form']['date_stop']):
# raise orm.except_orm(_('Krivo razdoblje!'),_("Potrebno je upisati oba datuma za ispis po datumima!"))
report_name = None
if knjiga_type == 'ira':
# report_name = 'knjiga.ira'
# report_name = 'knjiga.ira.eu.2014'
report_name = 'knjiga_ira_ods'
elif knjiga_type in ('ura', 'ura_uvoz'):
# report_name = 'knjiga.ura'
# report_name = 'knjiga.ura.eu.2014'
report_name = 'knjiga_ura_ods'
elif knjiga_type in ('ura_tu', 'ura_st', 'ura_nerezident'):
report_name = 'knjiga_ura_prijenos'
if context.get('xml'):
return self.create_xml(cr, uid, ids, context, datas, report_name)
return {
'type': 'ir.actions.report.xml',
'report_name': report_name,
'datas': datas,
}
def create_xml(self, cr, uid, ids, context=None, datas=False, report_name=False):
form = self.browse(cr, uid, ids)[0]
if not form.company_id.podrucje_djelatnosti:
raise Warning(_('Warning'),
_('Please set company data : Area of activity'))
if form.knjiga_id.type != 'ura':
raise Warning(_('Warning'),
_('Only URA is for XML export!'))
try:
from ..report import knjiga_ura as URA
from ..report.vat_book_report_common import get_vat_book_report_common
from . import xml_common
except:
raise Warning(_('Warning'),
_('Important librarys missing!'))
def decimal_num(num):
# JER ETO u podacim mi dodje round na 3 znamenke...
num = str(round(num, 2))
dec = num.split('.')[1]
if dec == '0':
num += '0'
elif len(dec) > 2:
num = num[:-1]
return num
parser = URA.Parser(cr, uid, report_name, context=context)
parser.set_context(objects=[], data=datas, ids=[])
parser_ctx = parser.localcontext
lines = parser_ctx['get_lines'](datas)
total = parser_ctx['get_totals']()
total = total and total[0] or False
metadata, identifier = xml_common.create_xml_metadata(self, {
'naslov': u'Knjiga primljenih (ulaznih) računa',
'autor': ' '.join((
form.company_id.responsible_fname,
form.company_id.responsible_lname)),
'format': 'text/xml',
'jezik': 'hr-HR',
'uskladjenost': 'ObrazacURA-v1-0',
'tip': u'Elektronički obrazac',
'adresant': 'Ministarstvo Financija, Porezna uprava, Zagreb'
})
EM = objectify.ElementMaker(annotate=False)
date_start = form.date_start and form.date_start or \
form.period_from.date_start
date_stop = form.date_stop and form.date_stop or \
form.period_to.date_stop
zaglavlje = EM.Zaglavlje(
EM.Razdoblje(
EM.DatumOd(date_start),
EM.DatumDo(date_stop)),
EM.Obveznik(
EM.OIB(form.company_id.vat[2:]),
EM.Naziv(form.company_id.name),
EM.Adresa(
EM.Mjesto(form.company_id.city),
EM.Ulica(form.company_id.ulica),
EM.Broj(form.company_id.kbr),
EM.DodatakKucnomBroju(form.company_id.kbr_dodatak and \
form.company_id.kbr_dodatak or '')
),
EM.PodrucjeDjelatnosti(form.company_id.podrucje_djelatnosti),
EM.SifraDjelatnosti(form.company_id.l10n_hr_base_nkd_id.code),),
EM.ObracunSastavio(
EM.Ime(form.company_id.responsible_fname),
EM.Prezime(form.company_id.responsible_lname),
),
)
racuni = []
errors = []
for line in lines:
partner = line['partner_name'].split(', ')
partner_r4 = partner[0]
partner_r5 = ', '.join((partner[1], partner[2]))
if line['partner_oib'] == '':
errors.append(line)
continue
racuni.append(EM.R(
EM.R1(line['rbr'].replace('.', '')),
EM.R2(line['invoice_number']),
EM.R3(line['invoice_date']),
EM.R4(partner_r4),
EM.R5(partner_r5),
EM.R6(line['vat_type']),
EM.R7(line['partner_oib'].lstrip().rstrip()),
EM.R8(decimal_num(line['stupac6'])),
EM.R9(decimal_num(line['stupac7'])),
EM.R10(decimal_num(line['stupac8'])),
EM.R11(decimal_num(line['stupac9'])),
EM.R12(decimal_num(line['stupac10'])),
EM.R13(decimal_num(line['stupac11'])),
EM.R14(decimal_num(line['stupac12'])),
EM.R15(decimal_num(line['stupac13'])),
EM.R16(decimal_num(line['stupac14'])),
EM.R17(decimal_num(line['stupac15'])),
EM.R18(decimal_num(line['stupac16'])),
))
Racuni = EM.Racuni(EM.R)
Racuni.R = racuni
Ukupno = EM.Ukupno(
EM.U8(decimal_num(total['stupac6'])),
EM.U9(decimal_num(total['stupac7'])),
EM.U10(decimal_num(total['stupac8'])),
EM.U11(decimal_num(total['stupac9'])),
EM.U12(decimal_num(total['stupac10'])),
EM.U13(decimal_num(total['stupac11'])),
EM.U14(decimal_num(total['stupac12'])),
EM.U15(decimal_num(total['stupac13'])),
EM.U16(decimal_num(total['stupac14'])),
EM.U17(decimal_num(total['stupac15'])),
EM.U18(decimal_num(total['stupac16'])),
)
tijelo = EM.Tijelo(Racuni, Ukupno)
PDV = objectify.ElementMaker(
namespace='http://e-porezna.porezna-uprava.hr/sheme/zahtjevi/ObrazacURA/v1-0',
)
obrazac = PDV.ObrazacURA(metadata, zaglavlje, tijelo, verzijaSheme='1.0')
pdv_xml = xml_common.etree_tostring(self, obrazac)
pdv_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + pdv_xml
# print pdv_xml
# TODO: validate xml
# xml_common.validate_xml
file_path = os.path.dirname(os.path.abspath(__file__))
xml = {
'path': file_path,
'xsd_path': 'shema/URA',
'xsd_name': 'ObrazacURA-v1-0.xsd',
'xml': pdv_xml
}
valid = xml_common.validate_xml(self, xml)
data64 = base64.encodestring(pdv_xml)
xml_name = 'PDV_Obrazac_%s_%s.XML' % (date_start.replace('-', ''),
date_stop.replace('-',''))
form.write({'state': 'get',
'data': data64,
'name': xml_name
})
if errors:
msg= "Errors\n"
for e in errors:
msg += "%s - %s\n" % (e['rbr'], e['partner_name'])
raise Warning('Nedostaje OIB', msg)
return {
'type': 'ir.actions.act_window',
'res_model': 'pdv.knjiga',
'view_mode': 'form',
'view_type': 'form',
'res_id': ids[0],
'views': [(False, 'form')],
'target': 'new',
}
| agpl-3.0 | 8,604,639,130,252,183,000 | 39.566901 | 115 | 0.512369 | false |
dreadrel/UWF_2014_spring_COP3990C-2507 | notebooks/scripts/book_code/code/getattribute-person.py | 1 | 1434 | class Person: # Portable: 2.X or 3.X
def __init__(self, name): # On [Person()]
self._name = name # Triggers __setattr__!
def __getattribute__(self, attr): # On [obj.any]
print('get: ' + attr)
if attr == 'name': # Intercept all names
attr = '_name' # Map to internal name
return object.__getattribute__(self, attr) # Avoid looping here
def __setattr__(self, attr, value): # On [obj.any = value]
print('set: ' + attr)
if attr == 'name':
attr = '_name' # Set internal name
self.__dict__[attr] = value # Avoid looping here
def __delattr__(self, attr): # On [del obj.any]
print('del: ' + attr)
if attr == 'name':
attr = '_name' # Avoid looping here too
del self.__dict__[attr] # but much less common
bob = Person('Bob Smith') # bob has a managed attribute
print(bob.name) # Runs __getattr__
bob.name = 'Robert Smith' # Runs __setattr__
print(bob.name)
del bob.name # Runs __delattr__
print('-'*20)
sue = Person('Sue Jones') # sue inherits property too
print(sue.name)
#print(Person.name.__doc__) # No equivalent here
| apache-2.0 | 6,289,622,435,151,963,000 | 43.8125 | 76 | 0.459554 | false |
kissgyorgy/Womanager | mainwindow.py | 1 | 47297 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Thu Oct 10 19:57:03 2013
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(756, 515)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setEnabled(True)
self.groupBox.setMinimumSize(QtCore.QSize(300, 75))
self.groupBox.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.groupBox.setVisible(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(30, 30, 62, 16))
self.label_4.setMargin(0)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(30, 50, 151, 16))
self.label_5.setStyleSheet(_fromUtf8(""))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(10, 30, 16, 16))
self.label.setStyleSheet(_fromUtf8("background-color: rgb(255, 155, 103);"))
self.label.setText(_fromUtf8(""))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 50, 16, 16))
self.label_2.setStyleSheet(_fromUtf8("background-color: rgb(168, 255, 171);"))
self.label_2.setText(_fromUtf8(""))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(90, 30, 16, 16))
self.label_3.setStyleSheet(_fromUtf8("background-color: rgb(255,255, 255);"))
self.label_3.setText(_fromUtf8(""))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_7 = QtGui.QLabel(self.groupBox)
self.label_7.setGeometry(QtCore.QRect(110, 30, 71, 16))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self.groupBox)
self.label_8.setGeometry(QtCore.QRect(200, 30, 16, 16))
self.label_8.setAutoFillBackground(True)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(self.groupBox)
self.label_9.setGeometry(QtCore.QRect(220, 30, 71, 16))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout.addWidget(self.groupBox)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.dateEdit = QtGui.QDateEdit(self.centralwidget)
self.dateEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.dateEdit.setDate(QtCore.QDate(2013, 8, 1))
self.dateEdit.setMinimumDate(QtCore.QDate(1900, 8, 1))
self.dateEdit.setCurrentSection(QtGui.QDateTimeEdit.MonthSection)
self.dateEdit.setCalendarPopup(False)
self.dateEdit.setCurrentSectionIndex(1)
self.dateEdit.setObjectName(_fromUtf8("dateEdit"))
self.gridLayout.addWidget(self.dateEdit, 1, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 0, 1, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setMinimumSize(QtCore.QSize(0, 75))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.first_month_label = QtGui.QLabel(self.centralwidget)
self.first_month_label.setText(_fromUtf8(""))
self.first_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.first_month_label.setObjectName(_fromUtf8("first_month_label"))
self.verticalLayout_2.addWidget(self.first_month_label)
self.first_month_table = QtGui.QTableWidget(self.centralwidget)
self.first_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.first_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.first_month_table.setObjectName(_fromUtf8("first_month_table"))
self.first_month_table.setColumnCount(31)
self.first_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.first_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 21, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 22, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 23, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 24, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 25, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 26, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 27, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 28, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 29, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 30, item)
self.first_month_table.horizontalHeader().setDefaultSectionSize(22)
self.first_month_table.horizontalHeader().setMinimumSectionSize(5)
self.first_month_table.verticalHeader().setDefaultSectionSize(22)
self.first_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.first_month_table)
self.second_month_label = QtGui.QLabel(self.centralwidget)
self.second_month_label.setText(_fromUtf8(""))
self.second_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.second_month_label.setObjectName(_fromUtf8("second_month_label"))
self.verticalLayout_2.addWidget(self.second_month_label)
self.second_month_table = QtGui.QTableWidget(self.centralwidget)
self.second_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.second_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.second_month_table.setLayoutDirection(QtCore.Qt.LeftToRight)
self.second_month_table.setObjectName(_fromUtf8("second_month_table"))
self.second_month_table.setColumnCount(31)
self.second_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.second_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 21, item)
self.second_month_table.horizontalHeader().setDefaultSectionSize(22)
self.second_month_table.horizontalHeader().setMinimumSectionSize(5)
self.second_month_table.verticalHeader().setDefaultSectionSize(22)
self.second_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.second_month_table)
self.third_month_label = QtGui.QLabel(self.centralwidget)
self.third_month_label.setText(_fromUtf8(""))
self.third_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.third_month_label.setObjectName(_fromUtf8("third_month_label"))
self.verticalLayout_2.addWidget(self.third_month_label)
self.third_month_table = QtGui.QTableWidget(self.centralwidget)
self.third_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.third_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.third_month_table.setObjectName(_fromUtf8("third_month_table"))
self.third_month_table.setColumnCount(31)
self.third_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.third_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 21, item)
self.third_month_table.horizontalHeader().setDefaultSectionSize(22)
self.third_month_table.horizontalHeader().setMinimumSectionSize(5)
self.third_month_table.verticalHeader().setDefaultSectionSize(22)
self.third_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.third_month_table)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 756, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.fileMenu = QtGui.QMenu(self.menubar)
self.fileMenu.setObjectName(_fromUtf8("fileMenu"))
self.helpMenu = QtGui.QMenu(self.menubar)
self.helpMenu.setObjectName(_fromUtf8("helpMenu"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionLoad = QtGui.QAction(MainWindow)
self.actionLoad.setObjectName(_fromUtf8("actionLoad"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionHelp = QtGui.QAction(MainWindow)
self.actionHelp.setObjectName(_fromUtf8("actionHelp"))
self.actionExport = QtGui.QAction(MainWindow)
self.actionExport.setObjectName(_fromUtf8("actionExport"))
self.fileMenu.addAction(self.actionSave)
self.fileMenu.addAction(self.actionLoad)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actionExport)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actionQuit)
self.helpMenu.addAction(self.actionHelp)
self.helpMenu.addSeparator()
self.helpMenu.addAction(self.actionAbout)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.helpMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.groupBox.show)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.pushButton, self.dateEdit)
MainWindow.setTabOrder(self.dateEdit, self.first_month_table)
MainWindow.setTabOrder(self.first_month_table, self.second_month_table)
MainWindow.setTabOrder(self.second_month_table, self.third_month_table)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Womanager", None))
self.groupBox.setTitle(_translate("MainWindow", "Legend", None))
self.label_4.setText(_translate("MainWindow", "Friday", None))
self.label_5.setText(_translate("MainWindow", "3rd week free weekday", None))
self.label_7.setText(_translate("MainWindow", "free day", None))
self.label_8.setText(_translate("MainWindow", "X", None))
self.label_9.setText(_translate("MainWindow", "workday", None))
self.dateEdit.setDisplayFormat(_translate("MainWindow", "yyyy.MM.", None))
self.label_6.setText(_translate("MainWindow", "first month:", None))
self.pushButton.setText(_translate("MainWindow", "Manage!", None))
item = self.first_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.first_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.first_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.first_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.first_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.first_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.first_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.first_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.first_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.first_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.first_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.first_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.first_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.first_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.first_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.first_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.first_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.first_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.first_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.first_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.first_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.first_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.first_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.first_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.first_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.first_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.first_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.first_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.first_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.first_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.first_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.first_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.first_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.first_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.first_month_table.isSortingEnabled()
self.first_month_table.setSortingEnabled(False)
self.first_month_table.setSortingEnabled(__sortingEnabled)
item = self.second_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.second_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.second_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.second_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.second_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.second_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.second_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.second_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.second_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.second_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.second_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.second_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.second_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.second_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.second_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.second_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.second_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.second_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.second_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.second_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.second_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.second_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.second_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.second_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.second_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.second_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.second_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.second_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.second_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.second_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.second_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.second_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.second_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.second_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.second_month_table.isSortingEnabled()
self.second_month_table.setSortingEnabled(False)
self.second_month_table.setSortingEnabled(__sortingEnabled)
item = self.third_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.third_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.third_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.third_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.third_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.third_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.third_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.third_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.third_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.third_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.third_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.third_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.third_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.third_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.third_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.third_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.third_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.third_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.third_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.third_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.third_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.third_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.third_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.third_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.third_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.third_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.third_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.third_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.third_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.third_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.third_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.third_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.third_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.third_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.third_month_table.isSortingEnabled()
self.third_month_table.setSortingEnabled(False)
self.third_month_table.setSortingEnabled(__sortingEnabled)
self.fileMenu.setTitle(_translate("MainWindow", "File", None))
self.helpMenu.setTitle(_translate("MainWindow", "Help", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionLoad.setText(_translate("MainWindow", "Load", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionHelp.setText(_translate("MainWindow", "Help", None))
self.actionExport.setText(_translate("MainWindow", "Export...", None))
| gpl-2.0 | -7,005,826,944,289,512,000 | 52.624717 | 106 | 0.672495 | false |
figshare/Total-Impact | plugins/aliases/url2id/Plugin.py | 1 | 5545 | #!/usr/bin/env python
import simplejson
import json
import urllib
import time
import re
import nose
from nose.tools import assert_equals
import sys
import os
# This hack is to add current path when running script from command line
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import BasePlugin
from BasePlugin.BasePlugin import BasePluginClass
from BasePlugin.BasePlugin import TestBasePluginClass
# Permissions: RWX for owner, WX for others. Set this here so that .pyc are created with these permissions
os.umask(022)
# Conforms to API specified here: https://github.com/mhahnel/Total-Impact/wiki/Plugin-requirements
# To do automated tests with nosy
# nosy MendeleyPlugin.py -A \'not skip\'
def skip(f):
f.skip = True
return f
class PluginClass(BasePluginClass):
# each plugin needs to customize this stuff
SOURCE_NAME = "url2id"
SOURCE_DESCRIPTION = ""
SOURCE_URL = ""
SOURCE_ICON = ""
SOURCE_METRICS = {}
DEBUG = False
def __init__(self):
pass
# each plugin needs to write one of these
def derive_synonymns(self, url):
match = re.search("http://www.ncbi.nlm.nih.gov/pubmed/(?P<id>\d+)", url, re.IGNORECASE)
if (match):
return({"pmid":match.group("id")})
match = re.search("http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=(?P<id>\d+)", url, re.IGNORECASE)
if (match):
return({"pmcid":match.group("id")})
match = re.search(r"(?P<id>http://hdl.handle.net/.*)", url, re.IGNORECASE)
if (match):
return({"hdl":match.group("id")})
match = re.search("(?P<id>10\.\d+/\S+)", url, re.IGNORECASE)
if (match):
doi = match.group("id")
doi = doi.replace("/abstract", "")
return({"doi":doi})
return(None)
# each plugin needs to write relevant versions of this
def artifact_type_recognized(self, id):
if id:
is_recognized = self.is_url(id)
else:
is_recognized = False
return(is_recognized)
# list of possible ids should be in order of preference, most prefered first
# returns the first valid one, or None if none are valid
def get_valid_id(self, list_of_possible_ids):
for id in list_of_possible_ids:
if (self.artifact_type_recognized(id)):
return(id)
return(None)
## this changes for every plugin
def build_artifact_response(self, id):
if not id:
return(None)
metrics_response = self.derive_synonymns(id)
if not metrics_response:
return(None)
response = dict(type="article", url=id)
response.update(metrics_response)
return(response)
## this may be need to customized by plugins to support varied id types etc
## every plugin should check API limitations and make sure they are respected here
## check Mendeley requirements!
def get_artifacts_metrics(self, query):
response_dict = dict()
error_msg = None
time_started = time.time()
for artifact_id in query:
(artifact_id, lookup_id) = self.get_relevant_id(artifact_id, query[artifact_id], ["url"])
if (artifact_id):
artifact_response = self.build_artifact_response(lookup_id)
if artifact_response:
response_dict[artifact_id] = artifact_response
if (time.time() - time_started > self.MAX_ELAPSED_TIME):
error_msg = "TIMEOUT"
break
return(response_dict, error_msg)
class TestPluginClass(TestBasePluginClass):
def setup(self):
self.plugin = MendeleyPluginClass()
self.test_parse_input = self.testinput.TEST_INPUT_DOI
## this changes for every plugin
def test_build_artifact_response(self):
response = self.plugin.build_artifact_response('10.1371/journal.pmed.0040215')
assert_equals(response, {'type': 'article', 'groups': 1, 'readers': 42})
## this changes for every plugin
def test_get_artifacts_metrics(self):
response = self.plugin.get_artifacts_metrics(self.test_parse_input)
assert_equals(response, ({u'10.1371/journal.pcbi.1000361': {'type': 'article', 'groups': 1, 'readers': 19}}, None))
#each plugin should make sure its range of inputs are covered
def test_run_plugin_doi(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DOI))
print response
assert_equals(len(response), 458)
def test_run_plugin_pmid(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_PMID))
print response
assert_equals(len(response), 379)
def test_run_plugin_url(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_URL))
print response
assert_equals(len(response), 379)
def test_run_plugin_invalid_id(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DUD))
print response
assert_equals(len(response), 379)
def test_run_plugin_multiple(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_ALL))
print response
assert_equals(len(response), 458)
| mit | -5,771,353,778,099,712,000 | 35.012987 | 123 | 0.621821 | false |
smilebin818/wx-cqwdt | DBDATA/fileServer.py | 1 | 1270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import SocketServer
import struct
import os
# Format: name_len --- one byte
# name --- name_len bytes
# data --- variable length
# Save data to name into current directory
# Refer to: http://blog.csdn.net/g__gle/article/details/8144968
addr = ('', 7819)
class MyTCPHandler (SocketServer.StreamRequestHandler):
def handle(self):
name_len = ord(self.rfile.read(1))
name = self.rfile.read(name_len)
print "Get request: %s" % name
file_size = struct.unpack(">l", self.rfile.read(4))[0]
restfile = file_size
fd = open(name, 'wb')
package_cnt = 0
while restfile > 4096:
package_cnt += 1
cont = self.rfile.read(4096)
fd.write(cont)
restfile -= 4096
if package_cnt >= 5:
self.request.send(struct.pack('>l', file_size - restfile))
package_cnt = 0
self.request.send(struct.pack('>l', file_size - restfile))
fd.write(self.rfile.read(restfile))
fd.close()
print "Out: %s\n" % name
server = SocketServer.TCPServer(addr, MyTCPHandler)
print "Serving on port %s ..." % addr[1]
server.serve_forever()
| gpl-3.0 | -6,804,128,155,356,753,000 | 28.534884 | 74 | 0.570866 | false |
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/third_party/httplib2/tests/test_encoding.py | 5 | 3632 | import httplib2
import tests
def test_gzip_head():
# Test that we don't try to decompress a HEAD response
http = httplib2.Http()
response = tests.http_response_bytes(
headers={'content-encoding': 'gzip', 'content-length': 42},
)
with tests.server_const_bytes(response) as uri:
response, content = http.request(uri, 'HEAD')
assert response.status == 200
assert int(response['content-length']) != 0
assert content == b''
def test_gzip_get():
# Test that we support gzip compression
http = httplib2.Http()
response = tests.http_response_bytes(
headers={'content-encoding': 'gzip'},
body=tests.gzip_compress(b'properly compressed'),
)
with tests.server_const_bytes(response) as uri:
response, content = http.request(uri, 'GET')
assert response.status == 200
assert 'content-encoding' not in response
assert '-content-encoding' in response
assert int(response['content-length']) == len(b'properly compressed')
assert content == b'properly compressed'
def test_gzip_post_response():
http = httplib2.Http()
response = tests.http_response_bytes(
headers={'content-encoding': 'gzip'},
body=tests.gzip_compress(b'properly compressed'),
)
with tests.server_const_bytes(response) as uri:
response, content = http.request(uri, 'POST', body=b'')
assert response.status == 200
assert 'content-encoding' not in response
assert '-content-encoding' in response
def test_gzip_malformed_response():
http = httplib2.Http()
# Test that we raise a good exception when the gzip fails
http.force_exception_to_status_code = False
response = tests.http_response_bytes(
headers={'content-encoding': 'gzip'},
body=b'obviously not compressed',
)
with tests.server_const_bytes(response, request_count=2) as uri:
with tests.assert_raises(httplib2.FailedToDecompressContent):
http.request(uri, 'GET')
# Re-run the test with out the exceptions
http.force_exception_to_status_code = True
response, content = http.request(uri, 'GET')
assert response.status == 500
assert response.reason.startswith('Content purported')
def test_deflate_get():
# Test that we support deflate compression
http = httplib2.Http()
response = tests.http_response_bytes(
headers={'content-encoding': 'deflate'},
body=tests.deflate_compress(b'properly compressed'),
)
with tests.server_const_bytes(response) as uri:
response, content = http.request(uri, 'GET')
assert response.status == 200
assert 'content-encoding' not in response
assert int(response['content-length']) == len(b'properly compressed')
assert content == b'properly compressed'
def test_deflate_malformed_response():
# Test that we raise a good exception when the deflate fails
http = httplib2.Http()
http.force_exception_to_status_code = False
response = tests.http_response_bytes(
headers={'content-encoding': 'deflate'},
body=b'obviously not compressed',
)
with tests.server_const_bytes(response, request_count=2) as uri:
with tests.assert_raises(httplib2.FailedToDecompressContent):
http.request(uri, 'GET')
# Re-run the test with out the exceptions
http.force_exception_to_status_code = True
response, content = http.request(uri, 'GET')
assert response.status == 500
assert response.reason.startswith('Content purported')
| bsd-3-clause | 4,938,064,596,127,947,000 | 35.686869 | 77 | 0.657764 | false |
blaze225/zulip | zerver/views/users.py | 1 | 16558 | from __future__ import absolute_import
from typing import Text, Union, Optional, Dict, Any, List, Tuple
import os
import simplejson as json
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from django.shortcuts import redirect
from django.conf import settings
from six.moves import map
from zerver.decorator import has_request_variables, REQ, JsonableError, \
require_realm_admin
from zerver.forms import CreateUserForm
from zerver.lib.actions import do_change_avatar_fields, do_change_bot_owner, \
do_change_is_admin, do_change_default_all_public_streams, \
do_change_default_events_register_stream, do_change_default_sending_stream, \
do_create_user, do_deactivate_user, do_reactivate_user, do_regenerate_api_key
from zerver.lib.avatar import avatar_url, get_avatar_url
from zerver.lib.response import json_error, json_success
from zerver.lib.streams import access_stream_by_name
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_string
from zerver.lib.users import check_change_full_name, check_full_name
from zerver.lib.utils import generate_random_token
from zerver.models import UserProfile, Stream, Realm, Message, get_user_profile_by_email, \
email_allowed_for_realm, get_user_profile_by_id
from zproject.jinja2 import render_to_response
def deactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if target.is_bot:
return json_error(_('No such user'))
if check_last_admin(target):
return json_error(_('Cannot deactivate the only organization administrator'))
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_user_own_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
if user_profile.is_realm_admin and check_last_admin(user_profile):
return json_error(_('Cannot deactivate the only organization administrator'))
do_deactivate_user(user_profile)
return json_success()
def check_last_admin(user_profile):
# type: (UserProfile) -> bool
admins = set(user_profile.realm.get_admin_users())
return user_profile.is_realm_admin and len(admins) == 1
def deactivate_bot_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such bot'))
if not target.is_bot:
return json_error(_('No such bot'))
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(request, user_profile, target):
# type: (HttpRequest, UserProfile, UserProfile) -> HttpResponse
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_deactivate_user(target)
return json_success()
def reactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_reactivate_user(target)
return json_success()
@has_request_variables
def update_user_backend(request, user_profile, email,
full_name=REQ(default="", validator=check_string),
is_admin=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[bool]) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
if is_admin is not None:
if not is_admin and check_last_admin(user_profile):
return json_error(_('Cannot remove the only organization administrator'))
do_change_is_admin(target, is_admin)
if (full_name is not None and target.full_name != full_name and
full_name.strip() != ""):
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name)
return json_success()
# TODO: Since eventually we want to support using the same email with
# different organizations, we'll eventually want this to be a
# logged-in endpoint so that we can access the realm_id.
def avatar(request, email_or_id, medium=None):
# type: (HttpRequest, str, bool) -> HttpResponse
"""Accepts an email address or user ID and returns the avatar"""
try:
int(email_or_id)
except ValueError:
get_user_func = get_user_profile_by_email
else:
get_user_func = get_user_profile_by_id
try:
# If there is a valid user account passed in, use its avatar
user_profile = get_user_func(email_or_id)
url = avatar_url(user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_source = 'G'
avatar_version = 1
url = get_avatar_url(avatar_source, email, avatar_version, medium=medium)
# We can rely on the url already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert '?' in url
url += '&' + request.META['QUERY_STRING']
return redirect(url)
def get_stream_name(stream):
# type: (Optional[Stream]) -> Optional[Text]
if stream:
return stream.name
return None
@has_request_variables
def patch_bot_backend(request, user_profile, email,
full_name=REQ(default=None),
bot_owner=REQ(default=None),
default_sending_stream=REQ(default=None),
default_events_register_stream=REQ(default=None),
default_all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
try:
bot = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
if full_name is not None:
check_change_full_name(bot, full_name)
if bot_owner is not None:
owner = get_user_profile_by_email(bot_owner)
do_change_bot_owner(bot, owner)
if default_sending_stream is not None:
if default_sending_stream == "":
stream = None # type: Optional[Stream]
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream)
if default_all_public_streams is not None:
do_change_default_all_public_streams(bot, default_all_public_streams)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source)
else:
return json_error(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result['bot_owner'] = bot.bot_owner.email
return json_success(json_result)
@has_request_variables
def regenerate_bot_api_key(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
bot = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
do_regenerate_api_key(bot)
json_result = dict(
api_key = bot.api_key
)
return json_success(json_result)
@has_request_variables
def add_bot_backend(request, user_profile, full_name_raw=REQ("full_name"), short_name=REQ(),
default_sending_stream_name=REQ('default_sending_stream', default=None),
default_events_register_stream_name=REQ('default_events_register_stream', default=None),
default_all_public_streams=REQ(validator=check_bool, default=None)):
# type: (HttpRequest, UserProfile, Text, Text, Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
short_name += "-bot"
full_name = check_full_name(full_name_raw)
email = '%s@%s' % (short_name, user_profile.realm.get_bot_domain())
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
# We validate client-side as well
return json_error(_('Bad name or username'))
try:
get_user_profile_by_email(email)
return json_error(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
return json_error(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name)
bot_profile = do_create_user(email=email, password='',
realm=user_profile.realm, full_name=full_name,
short_name=short_name, active=True,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot_profile)
json_result = dict(
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
def get_bots_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream')
bot_profiles = bot_profiles.order_by('date_joined')
def bot_info(bot_profile):
# type: (UserProfile) -> Dict[str, Any]
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({'bots': list(map(bot_info, bot_profiles))})
def get_members_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
realm = user_profile.realm
admins = set(user_profile.realm.get_admin_users())
members = []
for profile in UserProfile.objects.select_related().filter(realm=realm):
member = {"full_name": profile.full_name,
"is_bot": profile.is_bot,
"is_active": profile.is_active,
"is_admin": (profile in admins),
"email": profile.email,
"user_id": profile.id,
"avatar_url": avatar_url(profile)}
if profile.is_bot and profile.bot_owner is not None:
member["bot_owner"] = profile.bot_owner.email
members.append(member)
return json_success({'members': members})
@require_realm_admin
@has_request_variables
def create_user_backend(request, user_profile, email=REQ(), password=REQ(),
full_name_raw=REQ("full_name"), short_name=REQ()):
# type: (HttpRequest, UserProfile, Text, Text, Text, Text) -> HttpResponse
full_name = check_full_name(full_name_raw)
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error(_('Bad name or username'))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
if not email_allowed_for_realm(email, user_profile.realm):
return json_error(_("Email '%(email)s' does not belong to domain '%(domain)s'") %
{'email': email, 'domain': realm.domain})
try:
get_user_profile_by_email(email)
return json_error(_("Email '%s' already in use") % (email,))
except UserProfile.DoesNotExist:
pass
do_create_user(email, password, realm, full_name, short_name)
return json_success()
def generate_client_id():
# type: () -> Text
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1,
user_id = user_profile.id,
full_name = user_profile.full_name,
email = user_profile.email,
is_bot = user_profile.is_bot,
is_admin = user_profile.is_realm_admin,
short_name = user_profile.short_name)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
def authors_view(request):
# type: (HttpRequest) -> HttpResponse
with open(settings.CONTRIBUTORS_DATA) as f:
data = json.load(f)
return render_to_response(
'zerver/authors.html',
data,
request=request
)
| apache-2.0 | -2,537,478,890,875,718,000 | 41.239796 | 140 | 0.650803 | false |
dnbaker/emp | python/master_map.py | 1 | 1618 | #!/usr/bin/env python
import sys
import multiprocessing
import gzip
import os
from subprocess import check_call as cc, CalledProcessError
from download_genomes import is_valid_gzip, xfirstline
argv = sys.argv
def getopts():
import argparse
a = argparse.ArgumentParser()
a.add_argument("paths", nargs="+", help="Paths to as files.")
a.add_argument("--threads", "-p",
help="Number of threads to use while downloading.",
type=int, default=1)
a.add_argument("-o", "--out",
help="Path to write output. Default: stdout")
return a.parse_args()
def as2dict(path):
ret = {}
folder = "/".join(path.split("/")[:-1]) + "/"
for line in open(path):
if line.startswith("#"):
continue
toks = line.split("\t")
taxid = int(toks[5])
fn = folder + toks[19].split("/")[-1] + "_genomic.fna.gz"
if not os.path.isfile(fn):
sys.stderr.write("%s not a file. Continuing.\n" % fn)
continue
ret[xfirstline(fn)[1:].split()[0].decode()] = taxid
return ret
FTP_BASENAME = "ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/"
def main():
args = getopts()
master = {}
mini_dicts = (multiprocessing.Pool(args.threads).map if args.threads > 1
else map)(as2dict, args.paths)
with open(args.out, "w") if args.out else sys.stdout as outfile:
of = outfile.write
for mini in mini_dicts:
for k, v in mini.items():
of("%s\t%i\n" % (k, v))
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | 451,841,040,786,476,540 | 27.892857 | 76 | 0.567367 | false |
rvanlaar/easy-transifex | src/transifex/transifex/resources/tests/views/status.py | 1 | 2520 | # -*- coding: utf-8 -*-
from django.test.client import Client
from transifex.languages.models import Language
from transifex.resources.models import Resource
from transifex.txcommon.tests.base import BaseTestCase
class StatusCodesTest(BaseTestCase):
"""Test that all app URLs return correct status code.
Moreover, this kind of tests are useful to list down the urls that are
mounted to the resources app views.
TODO: Maybe in the future, we need to refactor the tests according to
request type, e.g. split them to GET and POST sets of URLs.
"""
# TODO: Fill in the urls
def setUp(self):
super(StatusCodesTest, self).setUp()
self.pages = {
200: [
('/projects/p/%s/resource/%s/' %
(self.project.slug, self.resource.slug)),
('/projects/p/%s/resource/%s/l/pt_BR/view/' %
(self.project.slug, self.resource.slug)),
('/projects/p/%s/resources/1' %
(self.project.slug,)),
('/projects/p/%s/resources/1/more/' %
(self.project.slug,)),
('/ajax/p/%s/resource/%s/l/pt_BR/actions/' %
(self.project.slug, self.resource.slug)),
],
302: [
('/projects/p/%s/resource/%s/edit/$' %
(self.project.slug, self.resource.slug)),
('/projects/p/%s/resource/%s/delete/$' %
(self.project.slug, self.resource.slug)),
('/projects/p/%s/resource/%s/l/pt_BR/download/' %
(self.project.slug, self.resource.slug)),
('/ajax/p/%s/resource/%s/l/pt_BR/lock_and_download/' %
(self.project.slug, self.resource.slug)),
],
403: [
('/projects/p/%s/resource/%s/l/pt_BR/delete_all/' %
(self.project.slug, self.resource.slug)),
],
404: [
'projects/p/f00/resource/b4r/',
]}
def testStatusCode(self):
"""Test that the response status code is correct"""
client = Client()
for expected_code, pages in self.pages.items():
for page_url in pages:
page = client.get(page_url)
self.assertEquals(page.status_code, expected_code,
"Status code for page '%s' was %s instead of %s" %
(page_url, page.status_code, expected_code))
| bsd-2-clause | 2,930,107,120,704,847,000 | 41 | 74 | 0.526984 | false |
kseetharam/genPolaron | xanalysis_groundstate_paper.py | 1 | 26133 | import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.lines import Line2D
import matplotlib.colors as colors
from matplotlib.animation import writers
import os
import itertools
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
from timeit import default_timer as timer
if __name__ == "__main__":
# # Initialization
# fm = matplotlib.font_manager.json_load(os.path.expanduser("~/.cache/matplotlib/fontlist-v310.json"))
# fm.findfont("serif", rebuild_if_missing=False)
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Adobe Garamond Pro']
# matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
mpegWriter = writers['ffmpeg'](fps=0.75, bitrate=1800)
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
# (Lx, Ly, Lz) = (105, 105, 105)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
# Toggle parameters
toggleDict = {'Dynamics': 'imaginary', 'Interaction': 'on', 'Grid': 'spherical', 'Coupling': 'twophonon', 'IRcuts': 'false', 'ReducedInterp': 'false', 'kGrid_ext': 'false'}
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, 1)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs'
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
animpath = animpath + '/rdyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
animpath = animpath + '/idyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
animpath = animpath + '_frohlich'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
animpath = animpath + '_twophonon'
if toggleDict['IRcuts'] == 'true':
innerdatapath = innerdatapath + '_IRcuts'
elif toggleDict['IRcuts'] == 'false':
innerdatapath = innerdatapath
print(innerdatapath)
# # # Concatenate Individual Datasets (aIBi specific)
# aIBi_List = [-15.0, -12.5, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]
# for aIBi in aIBi_List:
# ds_list = []; P_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(innerdatapath)):
# if filename[0:14] == 'quench_Dataset':
# continue
# if filename[0:6] == 'interp':
# continue
# if filename[0:2] == 'mm':
# continue
# if float(filename[13:-3]) != aIBi:
# continue
# print(filename)
# ds = xr.open_dataset(innerdatapath + '/' + filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# P_keys = []; P_ds_list = []; aIBi_ds_list = []
# for key, group in g:
# P_temp_list, ds_temp_list = zip(*list(group))
# P_keys.append(key) # note that key = P_temp_list[0]
# P_ds_list.append(ds_temp_list[0])
# with xr.concat(P_ds_list, pd.Index(P_keys, name='P')) as ds_tot:
# # ds_tot = xr.concat(P_ds_list, pd.Index(P_keys, name='P'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # # Concatenate Individual Datasets (aIBi specific, IRcuts)
# IRrat_Vals = [1, 2, 5, 10, 50, 1e2, 5e2, 1e3, 5e3, 1e4]
# aIBi_List = [-10.0, -5.0, -2.0, -0.5]
# for IRrat in IRrat_Vals:
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# for aIBi in aIBi_List:
# ds_list = []; P_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(IRdatapath)):
# if filename[0:14] == 'quench_Dataset':
# continue
# if filename[0:6] == 'interp':
# continue
# if filename[0:2] == 'mm':
# continue
# if float(filename[13:-3]) != aIBi:
# continue
# print(filename)
# ds = xr.open_dataset(IRdatapath + '/' + filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# P_keys = []; P_ds_list = []; aIBi_ds_list = []
# for key, group in g:
# P_temp_list, ds_temp_list = zip(*list(group))
# P_keys.append(key) # note that key = P_temp_list[0]
# P_ds_list.append(ds_temp_list[0])
# with xr.concat(P_ds_list, pd.Index(P_keys, name='P')) as ds_tot:
# # ds_tot = xr.concat(P_ds_list, pd.Index(P_keys, name='P'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # Analysis of Total Dataset
aIBi = -2
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset.nc')
# qds_aIBi = qds.sel(aIBi=aIBi)
qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
qds_aIBi = qds
PVals = qds['P'].values
tVals = qds['t'].values
n0 = qds.attrs['n0']
gBB = qds.attrs['gBB']
mI = qds.attrs['mI']
mB = qds.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
print(qds.attrs['k_mag_cutoff'] * xi)
aIBi_Vals = np.array([-12.5, -10.0, -9.0, -8.0, -7.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]) # used by many plots (spherical)
# # PHASE DIAGRAM (SPHERICAL)
Pnormdes = 0.5
Pind = np.abs(PVals / (mI * nu) - Pnormdes).argmin()
P = PVals[Pind]
ZVals = np.zeros(aIBi_Vals.size)
for aind, aIBi in enumerate(aIBi_Vals):
qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
ZVals[aind] = np.exp(-1 * qds_aIBi.isel(P=Pind, t=-1)['Nph'].values)
xmin = np.min(aIBi_Vals)
xmax = 1.01 * np.max(aIBi_Vals)
fig, ax = plt.subplots()
ax.plot(aIBi_Vals, ZVals, 'g-')
ax.set_title('Quasiparticle Residue (' + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.2f})'.format(P / (mI * nu)))
ax.set_xlabel(r'$a_{IB}^{-1}$')
ax.set_ylabel(r'$Z=e^{-N_{ph}}$')
ax.set_xlim([xmin, xmax])
ax.set_ylim([0, 1.1])
plt.show()
# # # # BOGOLIUBOV DISPERSION (SPHERICAL)
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds.coords['k'].values); kgrid.initArray_premade('th', qds.coords['th'].values)
# kVals = kgrid.getArray('k')
# wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
# fig, ax = plt.subplots()
# ax.plot(kVals, wk_Vals, 'k-', label='')
# ax.plot(kVals, nu * kVals, 'b--', label=r'$c_{BEC}|k|$')
# ax.set_title('Bogoliubov Phonon Dispersion')
# ax.set_xlabel(r'$|k|$')
# ax.set_ylabel(r'$\omega_{|k|}$')
# ax.set_xlim([0, 2])
# ax.set_ylim([0, 3])
# ax.legend(loc=2, fontsize='x-large')
# plt.show()
# # # PHASE DIAGRAM (SPHERICAL)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).isel(t=-1).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# # Pcrit[aind] = Pinf_Vals[np.argwhere(Einf_2ndderiv_Vals < 0)[-2][0] + 3]
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0] # there is a little bit of fudging with the -3 here so that aIBi=-10 gives me Pcrit/(mI*c) = 1 -> I can also just generate data for weaker interactions and see if it's better
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# scalefac = 1.0
# # scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# xmin = np.min(aIBi_interpVals)
# xmax = 1.01 * np.max(aIBi_interpVals)
# ymin = 0
# ymax = 1.01 * np.max(Pcrit_interpVals)
# font = {'family': 'serif', 'color': 'black', 'size': 14}
# sfont = {'family': 'serif', 'color': 'black', 'size': 13}
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, Pcrit_norm, 'kx')
# ax.plot(aIBi_interpVals, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax.set_title('Ground State Phase Diagram')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel(r'$\frac{P}{m_{I}c_{BEC}}$')
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# ax.fill_between(aIBi_interpVals, Pcrit_interpVals, ymax, facecolor='b', alpha=0.25)
# ax.fill_between(aIBi_interpVals, ymin, Pcrit_interpVals, facecolor='g', alpha=0.25)
# ax.text(-3.0, ymin + 0.175 * (ymax - ymin), 'Polaron', fontdict=font)
# ax.text(-2.9, ymin + 0.1 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# ax.text(-6.5, ymin + 0.6 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax.text(-6.35, ymin + 0.525 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# plt.show()
# # # ENERGY DERIVATIVES (SPHERICAL)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals = np.zeros((PVals.size, tVals.size))
# for Pind, P in enumerate(PVals):
# for tind, t in enumerate(tVals):
# CSAmp = CSAmp_ds.sel(P=P, t=t).values
# Energy_Vals[Pind, tind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Energy_Vals_inf = Energy_Vals[:, -1]
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 5 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound, vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# ms_mask = Pinf_Vals <= 0.5
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms, ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# fig, axes = plt.subplots(nrows=3, ncols=1)
# axes[0].plot(Pinf_Vals, Einf_Vals, 'k-')
# axes[0].set_title('Ground State Energy (' + r'$a_{IB}^{-1}=$' + '{0})'.format(aIBi))
# axes[0].set_xlabel('P')
# axes[0].set_ylim([1.1 * np.min(Einf_Vals), -0.5])
# axes[0].set_xlim([0, 2.0])
# axes[1].plot(Pinf_Vals, Einf_1stderiv_Vals, 'k-')
# axes[1].set_title('First Derivative of Energy')
# axes[1].set_xlabel('P')
# axes[1].plot(Pinf_Vals, vsound * np.ones(Pinf_Vals.size), 'r--', linewidth=2.0)
# axes[1].set_ylim([0, 1.2 * np.max(Einf_1stderiv_Vals)])
# axes[1].set_xlim([0, 2.0])
# axes[2].plot(Pinf_Vals[::2], Einf_2ndderiv_Vals[::2], 'ko')
# axes[2].set_title('Second Derivative of Energy')
# axes[2].set_xlabel('P')
# axes[2].plot(Pinf_Vals, ms * np.ones(Pinf_Vals.size), 'c--', linewidth=2.0)
# axes[2].set_ylim([0, 1.2 * np.max(Einf_2ndderiv_Vals)])
# axes[2].set_xlim([0, 2.0])
# # # This plot below is for saturation/convergence of the energy with imaginary time
# # fig3, ax3 = plt.subplots()
# # Pind = 8
# # ax3.plot(tVals, np.abs(Energy_Vals[Pind, :]), 'k-')
# # ax3.set_yscale('log')
# # ax3.set_xscale('log')
# # ax3.set_title('Ground State Energy (' + r'$a_{IB}^{-1}=$' + '{0}, '.format(aIBi) + r'$P=$' + '{:.2f})'.format(PVals[Pind]))
# # ax3.set_xlabel('Imaginary time')
# fig.tight_layout()
# plt.show()
# # # POLARON SOUND VELOCITY (SPHERICAL)
# # Check to see if linear part of polaron (total system) energy spectrum has slope equal to sound velocity
# vsound_Vals = np.zeros(aIBi_Vals.size)
# vI_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound_Vals[aind], vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# vI_inf_tck = interpolate.splrep(PVals, PI_Vals / mI, s=0)
# vI_inf_Vals = 1 * interpolate.splev(Pinf_Vals, vI_inf_tck, der=0)
# vI_Vals[aind] = np.polyfit(Pinf_sound, vI_inf_Vals[sound_mask], deg=0)
# print(vsound_Vals)
# print(100 * (vsound_Vals - nu) / nu)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, vsound_Vals, 'rx', mew=1, ms=10, label='Post-Transition Polaron Sound Velocity (' + r'$\frac{\partial E}{\partial P}$' + ')')
# ax.plot(aIBi_Vals, vI_Vals, 'ko', mew=1, ms=10, markerfacecolor='none', label='Post-Transition Impurity Velocity (' + r'$\frac{P-<P_{ph}>}{m_{I}}$' + ')')
# ax.plot(aIBi_Vals, nu * np.ones(aIBi_Vals.size), 'g--', linewidth=3.0, label='BEC Sound Speed')
# ax.set_ylim([0, 1.2])
# ax.legend(loc=(0.25, 0.1))
# ax.set_title('Velocity Comparison')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel('Velocity')
# plt.show()
# # # POLARON EFFECTIVE MASS (SPHERICAL)
# ms_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# massEnhancement_Vals = (1 / ms_Vals) / mI
# mE_tck = interpolate.splrep(aIBi_Vals, massEnhancement_Vals, s=0)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# mE_interpVals = 1 * interpolate.splev(aIBi_interpVals, mE_tck, der=0)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, massEnhancement_Vals, 'cD', mew=1, ms=10)
# ax.plot(aIBi_interpVals, mE_interpVals, 'c-')
# ax.set_title('Mass Enhancement')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel(r'$\frac{m^{*}}{m_{I}} = \frac{1}{m_{I}}\frac{\partial^{2} E}{\partial P^{2}}$')
# plt.show()
# # # POLARON EFFECTIVE MASS VS CRITICAL MOMENTUM (SPHERICAL)
# ms_Vals = np.zeros(aIBi_Vals.size)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0]
# massEnhancement_Vals = (1 / ms_Vals) / mI
# Pcrit_norm = Pcrit / (mI * nu)
# print(massEnhancement_Vals)
# print(Pcrit_norm)
# print(100 * np.abs(massEnhancement_Vals - Pcrit_norm) / Pcrit_norm)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, massEnhancement_Vals, 'co', mew=1, ms=10, markerfacecolor='none', label='Mass Enhancement (' + r'$\frac{m^{*}}{m_{I}}$' + ')')
# ax.plot(aIBi_Vals, Pcrit_norm, 'kx', mew=1, ms=10, label='Normalized Critical Momentum (' + r'$\frac{P_{crit}}{m_{I}c_{BEC}}$' + ')')
# ax.legend(loc=2)
# ax.set_title('Mass Enhancement vs Critical Momentum')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# plt.show()
# # # Nph (SPHERICAL)
# # IRrat_Vals = np.array([1, 2, 5, 10, 50, 1e2, 5e2, 1e3, 5e3, 1e4])
# IRrat_Vals = np.array([1, 2, 5, 10, 50, 1e2])
# aIBi_List = [-10.0, -5.0, -2.0, -0.5]
# aIBi = aIBi_List[1]
# IRrat = IRrat_Vals[0]
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# qds_aIBi = (xr.open_dataset(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))).isel(t=-1)
# PVals = qds_aIBi['P'].values
# n0 = qds_aIBi.attrs['n0']
# gBB = qds_aIBi.attrs['gBB']
# mI = qds_aIBi.attrs['mI']
# mB = qds_aIBi.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# Nph_ds = qds_aIBi['Nph']
# Nph_Vals = Nph_ds.values
# Pind = np.argmin(np.abs(PVals - 3.0 * mI * nu))
# Nph_IRcuts = np.zeros(IRrat_Vals.size)
# for ind, IRrat in enumerate(IRrat_Vals):
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# qds_IRrat = (xr.open_dataset(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))).isel(t=-1)
# kmin = np.min(qds_IRrat.coords['k'].values)
# Nph_ds_IRrat = qds_IRrat['Nph']
# Nph_IRcuts[ind] = Nph_ds_IRrat.values[Pind]
# fig, axes = plt.subplots(nrows=1, ncols=2)
# axes[0].plot(PVals / (mI * nu), Nph_Vals, 'k-')
# axes[0].set_title('Phonon Number (' + r'$aIB^{-1}=$' + '{0})'.format(aIBi))
# axes[0].set_xlabel(r'$\frac{P}{m_{I}c_{BEC}}$')
# axes[0].set_ylabel(r'$N_{ph}$')
# axes[1].plot(IRrat_Vals, Nph_IRcuts, 'g-')
# axes[1].set_xlabel('IR Cutoff Increase Ratio')
# axes[1].set_ylabel(r'$N_{ph}$')
# axes[1].set_title('Phonon Number (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.1f})'.format(PVals[Pind] / (mI * nu)))
# fig.tight_layout()
# plt.show()
# # IMPURITY DISTRIBUTION ANIMATION WITH CHARACTERIZATION (CARTESIAN)
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# # # Plot nPIm(t=inf)
# # qds_nPIm_inf.plot(ax=ax, label='P: {:.1f}'.format(P))
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pratio = 1.4
# Pnorm = PVals / (mI * nu)
# Pind = np.abs(Pnorm - Pratio).argmin()
# print(Pnorm[Pind])
# print(nPIm_deltaPeak_Vals[Pind])
# fig1, ax = plt.subplots()
# ax.plot(mI * nu * np.ones(PIm_Vals.size), np.linspace(0, 1, PIm_Vals.size), 'y--', label=r'$m_{I}c$')
# curve = ax.plot(PIm_Vec[Pind], nPIm_Vec[Pind], color='k', lw=3, label='')
# ind_s, ind_f = nPIm_FWHM_indices[Pind]
# FWHMcurve = ax.plot(np.linspace(PIm_Vec[Pind][ind_s], PIm_Vec[Pind][ind_f], 100), nPIm_Vec[Pind][ind_s] * np.ones(100), 'b-', linewidth=3.0, label='Incoherent Part FWHM')
# FWHMmarkers = ax.plot(np.linspace(PIm_Vec[Pind][ind_s], PIm_Vec[Pind][ind_f], 2), nPIm_Vec[Pind][ind_s] * np.ones(2), 'bD', mew=0.75, ms=7.5, label='')
# Zline = ax.plot(PVals[Pind] * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind], PIm_Vals.size), 'r-', linewidth=3.0, label='Delta Peak (Z-factor)')
# Zmarker = ax.plot(PVals[Pind], nPIm_deltaPeak_Vals[Pind], 'rx', mew=0.75, ms=7.5, label='')
# norm_text = ax.text(0.7, 0.65, r'$\int n_{|\vec{P_{I}}|} d|\vec{P_{I}}| = $' + '{:.2f}'.format(nPIm_Tot_Vals[Pind]), transform=ax.transAxes, color='k')
# ax.legend()
# ax.set_xlim([-0.01, np.max(PIm_Vec[Pind])])
# # ax.set_xlim([-0.01, 8])
# ax.set_ylim([0, 1.05])
# ax.set_title('Impurity Momentum Magnitude Distribution (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.2f})'.format(Pnorm[Pind]))
# ax.set_ylabel(r'$n_{|\vec{P_{I}}|}$')
# ax.set_xlabel(r'$|\vec{P_{I}}|$')
# # Plot characterization of nPIm(t=inf)
# fig2, axes = plt.subplots(nrows=1, ncols=2)
# axes[0].plot(PVals, nPIm_deltaPeak_Vals, 'r-')
# axes[0].set_xlabel('$P$')
# # axes[0].set_ylabel(r'$Z$')
# axes[0].set_title('Delta Peak (Z-factor)')
# axes[1].plot(PVals, nPIm_FWHM_Vals, 'b-')
# axes[1].set_xlabel('$P$')
# # axes[1].set_ylabel('FWHM')
# axes[1].set_title('Incoherent Part FWHM')
# fig2.tight_layout()
# plt.show()
| mit | 5,256,427,196,833,747,000 | 44.60733 | 257 | 0.576895 | false |
adamnovak/hgvm-builder | src/hgvmbuilder/toilvgfacade.py | 1 | 14356 | # hgvm-builder toilvgfacade.py: Provide a function-argument-based toil-vg API
"""
toil-vg curtrently has lots of cases where low-level functions depend on
command-line arguments in the options object. To make toil-vg targets callable
on arbitrary Toil file IDs, we need wrappers.
To use this facade, run add_options() on your argparse parser before parsing
arguments, initialize() on your resulting options namespace on the master, and
the various _job functions as Toil jobs to actually do stuff.
"""
import os
import os.path
import logging
import urlparse
import shutil
import argparse
import timeit
import toil_vg.vg_common
import toil_vg.vg_config
import toil_vg.vg_index
import toil_vg.vg_call
from toil.realtimeLogger import RealtimeLogger
from .toilpromise import ToilPromise
Logger = logging.getLogger("toilvgfacade")
class OptionFilter(object):
"""
Can wrap an ArgumentParser or other such class and drop options on a
blacklist/accept only options on a whitelist.
"""
def __init__(self, real_parser, blacklist=[]):
"""
Wrap the given actual parser with an add_option method.
"""
# Save the parser
self.real_parser = real_parser
# Save the blacklist
self.blacklist = set(blacklist)
def add_argument(self, name, *args, **kwargs):
"""
Add the given argument, if its name passes the filters.
"""
if name.strip("-") not in self.blacklist:
# Add it!
return self.real_parser.add_argument(name, *args, **kwargs)
# What options don't we want to pass through to/from the command line? Don't add
# the leading dashes. Holds a dict from toil vg operation type to the options
# that should be removed.
option_blacklist = {
"wrapperscript": {"out_store", "tool"},
"common": {"force_outstore"},
"index": {"index_name", "graphs", "chroms"},
# TODO: we can't use most of the toil-vg call logic because it's too tied to
# chunking and having a single reference path.
"call": {"overlap", "call_chunk_size", "genotype", "genotype_opts",
"filter_opts"}
}
# TODO: Un-blacklist --config and add logic to import the config file and send
# it via the file store to the nodes that actually use toil-vg. Or otherwise
# require special prep code to be run on the master to use this library.
def add_options(parser):
"""
Given an argparse parser or option group, add all the toil-vg configuration
options (for extra vg command flags, Docker containers, and so on).
"""
# Add all the non-blacklisted toil-vg common options
common_group = parser.add_argument_group("Toil VG configuration",
"options to configure the Toil VG wrapper")
toil_vg.vg_common.add_container_tool_parse_args(OptionFilter(common_group,
option_blacklist["common"]))
toil_vg.vg_common.add_common_vg_parse_args(OptionFilter(common_group,
option_blacklist["common"]))
# Add all the non-blacklisted vg index options to this group
index_group = parser.add_argument_group("VG Indexing",
"options to configure involations of `vg index`")
toil_vg.vg_index.index_parse_args(OptionFilter(index_group,
option_blacklist["index"]))
# And the call options
call_group = parser.add_argument_group("VG Calling",
"options to configure involations of `vg call`")
toil_vg.vg_call.chunked_call_parse_args(OptionFilter(call_group,
option_blacklist["call"]))
def initialize(options):
"""
Start up the Toil VG system on the master. Imports a bunch of config file
defaults into the options.
"""
logging.info("Using Toil VG from {}".format(toil_vg.__file__))
# Apply the config file
processed_options = toil_vg.vg_config.apply_config_file_args(options)
# Apply the changes back to the original options
options.__dict__ = processed_options.__dict__
# Make a command runner that uses Docker (or Singularity)
options.drunner = toil_vg.vg_common.ContainerRunner(
container_tool_map = toil_vg.vg_common.get_container_tool_map(options))
def sanitize_options(cli_options):
"""
Since Toil VG uses the command line options namespace thingy as a sort of
general context, we will need to feed one into every call.
However, since we removed some command-line options, our caller might feed
us an options object with those set (because it re-used those option names).
So we have to strip them out.
"""
# We'll fill this in
sanitized = argparse.Namespace()
# We compute a global blacklist of options that some toil vg function
# shouldn't get.
global_blacklist = set()
for local_blacklist in option_blacklist.itervalues():
for item in local_blacklist:
# We should strip this out
global_blacklist.add(item)
for key, value in vars(cli_options).iteritems():
# For everything we got fed
if key.strip("-") in global_blacklist:
# Blacklisted options get skipped
continue
# Copy everything else over
vars(sanitized)[key] = value
return sanitized
def xg_index_job(job, options, vg_ids):
"""
Index the given VG graphs into an XG file. Returns the ID of the XG file.
Automatically sets the correct resource requirements based on the config
passed via options.
Internally uses toil_vg to perform the indexing.
"""
# Do any options manipulation we need to do
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# Add the outstore, which we have sort of disabled. It insists on writing
# stuff, so just drop it in the current directory. It doesn't read it back.
options.out_store = "file:."
# Don't use outstore instead of the file store
options.force_outstore = False
# Pretend we're the pipeline tool
options.tool = "pipeline"
# Add stuff that toil vg index uses
# options.chroms has to have a name for every graph, to save it under in the
# local temp dir.
options.chroms = ["graph{}".format(i) for i in xrange(len(vg_ids))]
# options.index_name has to have the basename for the .xg in the local temp
# dir.
options.index_name = "xgindex"
return job.addChildJobFn(toil_vg.vg_index.run_xg_indexing, options,
vg_ids, cores=options.xg_index_cores, memory=options.xg_index_mem,
disk=options.xg_index_disk).rv()
def gcsa_index_job(job, options, vg_ids, primary_path_names=None):
"""
Index the given graphs into a GCSA/LCP index, and return a pair of file IDs
for the GCSA and the LCP files.
Will prune the graph before indexing unless options.prune_opts is explicitly
set as an empty list.
"""
# Do any options manipulation we need to do
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# Add the outstore, which we have sort of disabled. It insists on writing
# stuff, so just drop it in the current directory. It doesn't read it back.
options.out_store = "file:."
# Don't use outstore instead of the file store
options.force_outstore = False
# Pretend we're the pipeline tool
options.tool = "pipeline"
# Add stuff that toil vg index uses
# options.graphs has to have a name for every graph, to save it under in the
# local temp dir.
options.graphs = ["graph{}".format(i) for i in xrange(len(vg_ids))]
# We also need a "chroms" giving the primary path for each graph. It's OK if
# the path doesn't exist in a given graph, but if it does it will be added
# to the index.
# We have primary path names to use. We can just try and retain all ther
# paths in all graphs.
RealtimeLogger.info("Want to GCSA-index {} with paths {}".format(
vg_ids, primary_path_names))
# Fake path names
options.chroms = ["fake{}".format(i) for i in xrange(len(vg_ids))]
# options.index_name has to have the basename for the .gcsa in the local
# temp dir.
options.index_name = "gcsaindex"
return job.addChildJobFn(toil_vg.vg_index.run_gcsa_prep, options, vg_ids,
primary_path_override=primary_path_names,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk).rv()
def vg_call_job(job, options, vg_id, pileup_id, vcf=False, primary_paths=[]):
"""
Given a vg file ID and a pileup file ID, produce variant calls in Locus or
VCF format. Returns a pair of the VCF or locus file ID and the augmented
graph file ID.
if vcf is true, returns VCF oformat. Otherwise, returns Locus format. If
primary_paths is non-empty, passes those primary path names to vg call to
override its autodetection logic.
Handles requirements itself.
TODO: change toil-vg to allow not forcing a single contig and actually use
it.
"""
return job.addChildJobFn(run_vg_call, options, vg_id, pileup_id, vcf,
primary_paths,
cores=options.calling_cores, memory="100G", disk="50G").rv()
def run_vg_call(job, options, vg_id, pileup_id, vcf, primary_paths):
"""
Actually run vg call on a given pileup. Separate toil-gv-style job to make
requirement abstraction work.
"""
# Define a work_dir so Docker can work
work_dir = job.fileStore.getLocalTempDir()
# Download the vg
vg_filename = "hgvm.vg"
job.fileStore.readGlobalFile(vg_id, os.path.join(work_dir, vg_filename))
# Download the pileup
pileup_filename = "pileup.vgpu"
job.fileStore.readGlobalFile(pileup_id, os.path.join(work_dir,
pileup_filename))
# Pick an augmented graph filename
augmented_filename = "augmented.vg"
# Make arguments to annotate all the reference paths
ref_args = []
for ref_path in primary_paths:
# For every primary path we have defined, tell the caller to use it as a
# reference path.
ref_args.append("--ref")
ref_args.append(ref_path)
# Set up the VG run. Make sure to pass any user-defined call options that
# configure thresholds and filters.
vg_args = (["vg", "call", "-t", str(options.calling_cores), "--aug-graph",
augmented_filename, vg_filename, pileup_filename] + ref_args +
options.call_opts)
if not vcf:
# Don'tmake a VCF
vg_args.append("--no-vcf")
with job.fileStore.writeGlobalFileStream() as (output_handle, output_id):
# Call and stream the Locus or VCF data to the file store
options.drunner.call(job, [vg_args], outfile=output_handle,
work_dir=work_dir)
# Upload the augmented graph
augmented_id = job.fileStore.writeGlobalFile(
os.path.join(work_dir, augmented_filename))
# Return the file IDs
return output_id, augmented_id
def id_range_job(job, options, vg_id):
"""
Find the first and last ID in the given VG file and return them as a tuple.
"""
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# We need an options.chroms, because the job we're running returns an entry
# form it.
options.chroms = [None]
# Don't use outstore instead of the file store
options.force_outstore = False
# Run the job and return the start and end IDs as a pair of ints (dropping
# the chrom name)
return ToilPromise.wrap(job.addChildJobFn(toil_vg.vg_index.run_id_range,
options, 0, vg_id,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk)
).then(lambda (name, start, end): (int(start), int(end))
).unwrap_result()
def id_increment_job(job, options, vg_id, distance):
"""
Increment all the node IDs in the given vg graph by the given distance.
Return a new vg graph file ID.
Not actually in toil-vg, but we put it here so all the vg-touching functions
can live in one place.
"""
if distance == 0:
# No need to shift at all
return vg_id
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# We need an options.chroms, because the job we're running uses it for local
# filenames
options.chroms = ["increment"]
# Don't use outstore instead of the file store
options.force_outstore = False
return job.addChildJobFn(run_id_increment, options, 0, vg_id, distance,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk).rv()
def run_id_increment(job, options, graph_i, graph_id, distance):
"""
Actually do the ID incrementing. Is a separate, toil-vg-style job so it
can be added to toil-vg and so we can set the correct resource requirements.
"""
RealtimeLogger.info("Starting graph shift...")
start_time = timeit.default_timer()
work_dir = job.fileStore.getLocalTempDir()
# download graph
graph_filename = os.path.join(work_dir, '{}.vg'.format(
options.chroms[graph_i]))
toil_vg.vg_common.read_from_store(job, options, graph_id, graph_filename)
# Output
output_graph_filename = graph_filename + '.shifted.vg'
RealtimeLogger.info("Moving {} up by {} to {}".format(
graph_filename, distance, output_graph_filename))
with open(output_graph_filename, "w") as out_file:
command = ['vg', 'ids', '--increment', str(distance),
os.path.basename(graph_filename)]
options.drunner.call(job, command, work_dir=work_dir, outfile=out_file)
# Back to store
output_graph_id = toil_vg.vg_common.write_to_store(job, options,
output_graph_filename)
end_time = timeit.default_timer()
run_time = end_time - start_time
RealtimeLogger.info("Finished graph shift. Process took {} seconds.".format(
run_time))
return output_graph_id
| apache-2.0 | -7,453,239,172,026,143,000 | 32.231481 | 80 | 0.654012 | false |
mmeslab/linux-nctusde | compute_nctuss_related_function_size.py | 1 | 1314 | #!/usr/bin/python
nctuss_symbol_names = ["nctuss_poll_emacps",
"nctuss_xemacps_tx_poll",
"xemacps_tx_hwtstamp",
"nctuss_xemacps_rx",
"nctuss_xemacps_send_skb",
"nctuss_xemacps_start_xmit",
"xemacps_clear_csum",
"nctuss_xemacps_return_skb",
"nctuss_skb_pool_return",
"nctuss_gt_stop",
"nctuss_gt_resume",
"nctuss_gt_get_gt_counter_base",
"nctuss_gt_get_counter_value",
"nctuss_ttc_stop",
"nctuss_ttc_resume",
"nctuss_smp_invoke_function",
#"wakeup_softirqd",
"skb_push",
"skb_reset_transport_header", # inline function
"udp_hdr", # inline function
"skb_reset_network_header", # inline function
"skb_reset_mac_header", # inline function
"skb_reserve", # inline function
]
if __name__ == '__main__':
f = open("System.map", "r")
totalSize = 0
symbolNamesFound = 0
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
symbol_name = line[11:-1]
if symbol_name in nctuss_symbol_names:
print symbol_name
address = int(line[0:8], 16)
addressNext = int(lines[i+1][0:8], 16)
size = addressNext - address
totalSize += size
symbolNamesFound += 1
print "totalSize: %s" % (totalSize)
print "symbolNamesFound: %s" % (symbolNamesFound)
f.close()
| gpl-2.0 | -1,664,718,970,826,183,700 | 22.464286 | 52 | 0.616438 | false |
bxlab/HiFive_Paper | Scripts/hifive-1.1.3/hifive/hic_binning.py | 1 | 92093 | #!/usr/bin/env python
"""
This is a module contains scripts for generating compact, upper-triangle and full matrices of HiC interaction data.
Concepts
--------
These functions rely on the :class:`HiC` class in conjunction with the :class:`Fend` and :class:`HiCData` classes.
Data can either be arranged in compact, complete, or flattened (row-major) upper-triangle arrays. Compact arrays are N x M, where N is the number of fends or bins, and M is the maximum distance between fends or bins. This is useful for working with sets of short interactions. Data can be raw, fend-corrected, distance-dependence removed, or enrichment values. Arrays are 3-dimensional with observed values in the first layer of d3, expected values in the second layer of d3. The exception to this is upper-triangle arrays, which are 2d, divinding observed and expected along the second axis.
API Documentation
-----------------
"""
import os
import sys
import subprocess
import numpy
import h5py
try:
from mpi4py import MPI
except:
pass
from libraries._hic_interactions import find_max_fend
import libraries._hic_binning as _hic_binning
def find_cis_signal(hic, chrom, binsize=10000, binbounds=None, start=None, stop=None, startfend=None, stopfend=None,
datatype='enrichment', arraytype='compact', maxdistance=0, skipfiltered=False, returnmapping=False,
proportional=False, **kwargs):
"""
Create an array of format 'arraytype' and fill with data requested in 'datatype'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The name of a chromosome contained in 'hic'.
:type chrom: str.
:param binsize: This is the coordinate width of each bin. A value of zero indicates unbinned. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any fend not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The smallest coordinate to include in the array, measured from fend midpoints or the start of the first bin. If 'binbounds' is given, this value is ignored. If both 'start' and 'startfend' are given, 'start' will override 'startfend'. If unspecified, this will be set to the midpoint of the first fend for 'chrom', adjusted to the first multiple of 'binsize' if not zero. Optional.
:type start: int.
:param stop: The largest coordinate to include in the array, measured from fend midpoints or the end of the last bin. If 'binbounds' is given, this value is ignored. If both 'stop' and 'stopfend' are given, 'stop' will override 'stopfend'. If unspecified, this will be set to the midpoint of the last fend plus one for 'chrom', adjusted to the last multiple of 'start' + 'binsize' if not zero. Optional.
:type stop: int.
:param startfend: The first fend to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'start' is not given, this is set to the first valid fend in 'chrom'. In cases where 'start' is specified and conflicts with 'startfend', 'start' is given preference. Optional
:type startfend: int.
:param stopfend: The first fend not to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'stop' is not given, this is set to the last valid fend in 'chrom' plus one. In cases where 'stop' is specified and conflicts with 'stopfend', 'stop' is given preference. Optional.
:type stopfend: str.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param maxdistance: This specifies the maximum coordinate distance between bins that will be included in the array. If set to zero, all distances are included.
:type maxdistance: str.
:param skipfiltered: If 'True', all interaction bins for filtered out fends are removed and a reduced-size array is returned.
:type skipfiltered: bool.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:param proportional: Indicates whether interactions should proportionally contribute to bins based on the amount of overlap instead of being attributed solely based on midpoint. Only valid for binned heatmaps.
:type proportional: bool.
:returns: Array in format requested with 'arraytype' containing data requested with 'datatype'.
"""
if 'silent' in kwargs:
silent = kwargs['silent']
else:
silent = False
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment', 'expected']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
elif datatype in ['fend', 'enrichment'] and hic.normalization == 'none':
if not silent:
print >> sys.stderr, ("Normalization has not been performed yet on this project. Select either 'raw' or 'distance' for datatype. No data returned\n"),
return None
elif datatype in ['distance', 'enrichment'] and hic.distance_parameters is None:
if not silent:
print >> sys.stderr, ("No distance-dependence relationship has been calculated for this project yet. Select either 'raw' or 'fend' for datatype. No data returned\n"),
return None
if arraytype not in ['full', 'compact', 'upper']:
if not silent:
print >> sys.stderr, ("Unrecognized array type. No data returned.\n"),
return None
# Determine start, stop, startfend, and stopfend
chrint = hic.chr2int[chrom.strip('chr')]
if not binbounds is None:
start = binbounds[0, 0]
stop = binbounds[-1, 1]
startfend = _find_fend_from_coord(hic, chrint, start)
stopfend = _find_fend_from_coord(hic, chrint, stop) + 1
else:
if start is None and startfend is None:
startfend = hic.fends['chr_indices'][chrint]
while startfend < hic.fends['chr_indices'][chrint + 1] and hic.filter[startfend] == 0:
startfend += 1
if startfend == hic.fends['chr_indices'][chrint + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start = hic.fends['fends']['mid'][startfend]
if binsize > 0:
start = (start / binsize) * binsize
elif start is None:
start = hic.fends['fends']['mid'][startfend]
if binsize > 0:
start = (start / binsize) * binsize
else:
startfend = _find_fend_from_coord(hic, chrint, start)
if (stop is None or stop == 0) and stopfend is None:
stopfend = hic.fends['chr_indices'][chrint + 1]
while stopfend > hic.fends['chr_indices'][chrint] and hic.filter[stopfend - 1] == 0:
stopfend -= 1
stop = hic.fends['fends']['mid'][stopfend - 1]
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
elif stop is None or stop == 0:
stop = hic.fends['fends']['mid'][stopfend - 1]
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
else:
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
stopfend = _find_fend_from_coord(hic, chrint, stop) + 1
if not silent:
print >> sys.stderr, ("Finding %s %s array for %s:%i-%i...") % (datatype, arraytype, chrom, start, stop),
# If datatype is not 'expected', pull the needed slice of data
if datatype != 'expected':
start_index = hic.data['cis_indices'][startfend]
stop_index = hic.data['cis_indices'][stopfend]
if start_index == stop_index:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
data_indices = hic.data['cis_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
data = hic.data['cis_data'][start_index:stop_index, :]
data[:, :2] -= startfend
else:
data_indices = None
data = None
# Determine mapping of valid fends to bins
mapping = numpy.zeros(stopfend - startfend, dtype=numpy.int32) - 1
valid = numpy.where(hic.filter[startfend:stopfend] > 0)[0]
mids = hic.fends['fends']['mid'][startfend:stopfend]
if binsize == 0 and binbounds is None:
if skipfiltered:
mapping[valid] = numpy.arange(valid.shape[0])
num_bins = valid.shape[0]
else:
mapping[valid] = valid
num_bins = mapping.shape[0]
elif not binbounds is None:
start_indices = numpy.searchsorted(binbounds[:, 0], mids[valid], side='right') - 1
stop_indices = numpy.searchsorted(binbounds[:, 1], mids[valid], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid = valid[where]
mapping[valid] = start_indices[where]
num_bins = binbounds.shape[0]
else:
mapping[valid] = (mids[valid] - start) / binsize
num_bins = (stop - start) / binsize
# Find maximum interaction partner for each fend
if num_bins < 2:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
max_fend = numpy.zeros(mapping.shape[0], dtype=numpy.int32)
find_max_fend(max_fend, mids, hic.fends['fends']['chr'][startfend:stopfend],
hic.fends['chr_indices'][...], startfend, maxdistance)
max_fend = numpy.minimum(max_fend, mapping.shape[0])
if binsize == 0:
max_bin = numpy.amax(max_fend - numpy.arange(mapping.shape[0]))
if max_bin <= 0:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
else:
if maxdistance == 0:
max_bin = num_bins - 1
else:
max_bin = maxdistance / binsize
# If correction is required, determine what type and get appropriate data
if 'binning' not in hic.normalization and datatype != 'raw':
corrections = hic.corrections[startfend:stopfend]
elif datatype == 'raw':
corrections = numpy.ones(stopfend - startfend, dtype=numpy.float32)
else:
corrections = None
if ((hic.normalization in ['express', 'probability'] and
datatype == 'fend') or datatype == 'raw') and maxdistance == 0:
if datatype == 'fend':
correction_sums = numpy.bincount(mapping[valid], weights=corrections[valid],
minlength=num_bins).astype(numpy.float64)
else:
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
else:
correction_sums = None
if 'binning' in hic.normalization and datatype not in ['raw', 'distance']:
binning_corrections = hic.binning_corrections
binning_num_bins = hic.binning_num_bins
fend_indices = hic.binning_fend_indices
else:
binning_corrections = None
binning_num_bins = None
fend_indices = None
if datatype in ['distance', 'enrichment', 'expected']:
distance_parameters = hic.distance_parameters
chrom_mean = hic.chromosome_means[chrint]
else:
distance_parameters = None
chrom_mean = 0.0
# If proportional is requested, find bin ranges
if proportional and binsize > 0:
fends = hic.fends['fends'][startfend:stopfend]
ranges = numpy.zeros((mapping.shape[0], 2), dtype=numpy.int32)
overlap = numpy.zeros(ranges.shape, dtype=numpy.float32)
positions = numpy.arange(1, 1 + num_bins) * binsize + start
ranges[:, 0] = numpy.searchsorted(positions, fends['start'])
ranges[:, 1] = numpy.searchsorted(positions[:-1], fends['stop'])
where = numpy.where(ranges[:, 0] < ranges[:, 1])[0]
overlap[where, 0] = numpy.minimum(positions[ranges[where, 0]] - fends['start'][where],
binsize) / float(binsize)
overlap[where, 1] = numpy.minimum(fends['stop'][where] - positions[ranges[where, 1]] + binsize,
binsize) / float(binsize)
where = numpy.where(ranges[:, 0] == ranges[:, 1])[0]
overlap[where, 0] = (fends['stop'][where] - fends['start'][where]) / float(binsize)
else:
ranges = None
overlap = None
# Create requested array
if arraytype == 'compact':
data_array = numpy.zeros((num_bins, max_bin, 2), dtype=numpy.float32)
else:
data_array = numpy.zeros((num_bins * (num_bins - 1) / 2, 2), dtype=numpy.float32)
# Fill in data values
if arraytype == 'compact':
_hic_binning.find_cis_compact_expected(mapping, corrections, binning_corrections,
binning_num_bins, fend_indices, mids, distance_parameters,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
if datatype != 'expected':
_hic_binning.find_cis_compact_observed(data, data_indices, mapping, max_fend, data_array, ranges, overlap)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
corrections.fill(1)
_hic_binning.find_cis_compact_expected(mapping, corrections, None, None, None, mids, None,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
data_array = data_array[:, :, ::-1]
else:
_hic_binning.find_cis_upper_expected(mapping, corrections, binning_corrections,
binning_num_bins, fend_indices, mids, distance_parameters,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
if datatype != 'expected':
_hic_binning.find_cis_upper_observed(data, data_indices, mapping, max_fend, data_array, ranges, overlap)
else:
data_array[:, 0] = data_array[:, 1]
data_array[:, 1].fill(0)
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
corrections.fill(1)
_hic_binning.find_cis_upper_expected(mapping, corrections, None, None, None, mids, None,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
data_array = data_array[:, ::-1]
# If requesting 'full' array, convert 'upper' array type to 'full'
if arraytype == 'full':
indices = numpy.triu_indices(num_bins, 1)
full_data_array = numpy.zeros((num_bins, num_bins, 2), dtype=numpy.float32)
full_data_array[indices[1], indices[0], :] = data_array
full_data_array[indices[0], indices[1], :] = data_array
del data_array
data_array = full_data_array
if returnmapping:
bin_mapping = numpy.zeros((num_bins, 4), dtype=numpy.int32)
if binsize == 0 and binbounds is None:
if skipfiltered:
bin_mapping[:, 2] = valid + startfend
else:
bin_mapping[:, 2] = numpy.arange(startfend, stopfend)
bin_mapping[:, 3] = bin_mapping[:, 2] + 1
bin_mapping[:, 0] = hic.fends['fends']['start'][bin_mapping[:, 2]]
bin_mapping[:, 1] = hic.fends['fends']['stop'][bin_mapping[:, 2]]
else:
if binbounds is None:
bin_mapping[:, 0] = start + binsize * numpy.arange(num_bins)
bin_mapping[:, 1] = bin_mapping[:, 0] + binsize
else:
bin_mapping[:, :2] = binbounds
bin_mapping[:, 2] = numpy.searchsorted(mids, bin_mapping[:, 0]) + startfend
bin_mapping[:, 3] = numpy.searchsorted(mids, bin_mapping[:, 1]) + startfend
if not silent:
print >> sys.stderr, ("Done\n"),
return [data_array, bin_mapping]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return data_array
def _find_fend_from_coord(hic, chrint, coord):
"""Find the next fend after the coordinate on chromosome 'chrint'."""
first_fend = hic.fends['chr_indices'][chrint]
last_fend = hic.fends['chr_indices'][chrint + 1]
return numpy.searchsorted(hic.fends['fends']['mid'][first_fend:last_fend], coord) + first_fend
def bin_cis_array(data_array, data_mapping, binsize=10000, binbounds=None, start=None, stop=None, arraytype='full',
returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill 'binsize' bins or bins defined by 'binbounds' with data provided in the array passed by 'data_array'.
:param data_array: A 2d (upper) or 3d (compact) array containing data to be binned. Array format will be determined from the number of dimensions.
:type data_array: numpy array
:param data_mapping: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges in 'data_array'.
:type data_mapping: numpy array
:param binsize: This is the coordinate width of each bin. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The coordinate at the beginning of the first bin of the binned data. If unspecified, 'start' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping'. If 'binbounds' is given, 'start' is ignored. Optional.
:type start: int.
:param stop: The coordinate at the end of the last bin of the binned data. If unspecified, 'stop' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping'. If needed, 'stop' is adjusted upward to create a complete last bin. If 'binbounds' is given, 'stop' is ignored. Optional.
:type stop: int.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing binned data requested with 'datatype' pulled from 'data_array' or list of binned data array and mapping array.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# check that arraytype value is acceptable
if arraytype not in ['full', 'compact', 'upper']:
if not silent:
print >> sys.stderr, ("Unrecognized array type. No data returned.\n"),
return None
# Determine input array type
if len(data_array.shape) == 2 and data_mapping.shape[0] * (data_mapping.shape[0] - 1) / 2 == data_array.shape[0]:
input_type = 'upper'
elif len(data_array.shape) == 3 and data_array.shape[0] == data_mapping.shape[0]:
input_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized input array type. No data returned.\n"),
return None
# Determine start and stop, if necessary
if binbounds is None:
if start is None:
start = (data_mapping[0, 0] / binsize) * binsize
if stop is None:
stop = ((data_mapping[-1, 1] - 1) / binsize + 1) * binsize
else:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
num_bins = (stop - start) / binsize
binbounds = numpy.zeros((num_bins, 2), dtype=numpy.int32)
binbounds[:, 0] = numpy.arange(num_bins) * binsize + start
binbounds[:, 1] = binbounds[:, 0] + binsize
else:
num_bins = binbounds.shape[0]
start = binbounds[0, 0]
stop = binbounds[0, 1]
mids = (data_mapping[:, 0] + data_mapping[:, 1]) / 2
if not silent:
print >> sys.stderr, ("Finding binned %s array...") % (arraytype),
# Find bin mapping for each fend
mapping = numpy.zeros(mids.shape[0], dtype=numpy.int32) - 1
fend_ranges = numpy.zeros((binbounds.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds.shape[0]):
firstbin = numpy.searchsorted(mids, binbounds[i, 0])
lastbin = numpy.searchsorted(mids, binbounds[i, 1])
mapping[firstbin:lastbin] = i
fend_ranges[i, 0] = data_mapping[firstbin, 2]
fend_ranges[i, 1] = data_mapping[lastbin, 3]
# Create requested array
if arraytype == 'compact':
max_bin = (stop - start) / binsize + 1
binned_array = numpy.zeros((num_bins, max_bin, 2), dtype=numpy.float32)
else:
binned_array = numpy.zeros((num_bins * (num_bins - 1) / 2, 2), dtype=numpy.float32)
# Fill in binned data values
if arraytype == 'compact':
if input_type == 'compact':
_hic_binning.bin_compact_to_compact(binned_array, data_array, mapping)
else:
_hic_binning.bin_upper_to_compact(binned_array, data_array, mapping)
# Trim unused bins
valid = numpy.where(numpy.sum(binned_array[:, :, 1] > 0, axis=0) > 0)[0][-1]
binned_array = binned_array[:, :(valid + 1), :]
else:
if input_type == 'compact':
_hic_binning.bin_compact_to_upper(binned_array, data_array, mapping, num_bins)
else:
_hic_binning.bin_upper_to_upper(binned_array, data_array, mapping, num_bins)
# If requesting 'full' array, convert 'upper' array type to 'full'
if arraytype == 'full':
indices = numpy.triu_indices(num_bins, 1)
full_binned_array = numpy.zeros((num_bins, num_bins, 2), dtype=numpy.float32)
full_binned_array[indices[1], indices[0], :] = binned_array
full_binned_array[indices[0], indices[1], :] = binned_array
del binned_array
binned_array = full_binned_array
# If mapping requested, calculate bin bounds
if returnmapping:
mapping = numpy.zeros((num_bins, 4), dtype=numpy.int32)
mapping[:, 0] = binbounds[:, 0]
mapping[:, 1] = binbounds[:, 1]
mapping[:, 2:4] = fend_ranges
if not silent:
print >> sys.stderr, ("Done\n"),
return [binned_array, mapping]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return binned_array
def dynamically_bin_cis_array(unbinned, unbinnedpositions, binned, binbounds, minobservations=10,
searchdistance=0, removefailed=True, **kwargs):
"""
Expand bins in 'binned' to include additional data provided in 'unbinned' as necessary to meet 'minobservations', or 'searchdistance' criteria.
:param unbinned: A 2d or 3d array containing data in either compact or upper format to be used for filling expanding bins. Array format will be determined from the number of dimensions.
:type unbinned: numpy array
:param unbinnedpositions: A 2d integer array indicating the first and last coordinate of each bin in 'unbinned' array.
:type unbinnedpositions: numpy array
:param binned: A 2d or 3d array containing binned data in either compact or upper format to be dynamically binned. Array format will be determined from the number of dimensions. Data in this array will be altered by this function.
:type binned: numpy array
:param binbounds: An integer array indicating the start and end position of each bin in 'binned' array. This array should be N x 2, where N is the number of intervals in 'binned'.
:type binbounds: numpy array
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# Determine unbinned array type
if len(unbinned.shape) == 2 and (unbinnedpositions.shape[0] * (unbinnedpositions.shape[0] - 1) / 2 ==
unbinned.shape[0]):
unbinned_type = 'upper'
elif len(unbinned.shape) == 3 and unbinned.shape[0] == unbinnedpositions.shape[0]:
unbinned_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized unbinned array type. No data returned.\n"),
return None
# Determine binned array type
if len(binned.shape) == 2 and binbounds.shape[0] * (binbounds.shape[0] - 1) / 2 == binned.shape[0]:
binned_type = 'upper'
elif len(binned.shape) == 3 and binned.shape[0] == binbounds.shape[0]:
binned_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized binned array type. No data returned.\n"),
return None
if not silent:
print >> sys.stderr, ("Dynamically binning data..."),
# Determine bin edges relative to unbinned positions
unbinnedmids = (unbinnedpositions[:, 0] + unbinnedpositions[:, 1]) / 2
binedges = numpy.zeros(binbounds.shape, dtype=numpy.int32)
binedges[:, 0] = numpy.searchsorted(unbinnedmids, binbounds[:, 0])
binedges[:, 1] = numpy.searchsorted(unbinnedmids, binbounds[:, 1])
# Determine bin midpoints
mids = (binbounds[:, 0] + binbounds[:, 1]) / 2
# Dynamically bin using appropriate array type combination
if unbinned_type == 'upper':
if binned_type == 'upper':
_hic_binning.dynamically_bin_upper_from_upper(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
_hic_binning.dynamically_bin_compact_from_upper(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
if binned_type == 'upper':
_hic_binning.dynamically_bin_upper_from_compact(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
_hic_binning.dynamically_bin_compact_from_compact(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
if not silent:
print >> sys.stderr, ("Done\n"),
return None
def find_trans_signal(hic, chrom1, chrom2, binsize=10000, binbounds1=None, binbounds2=None, start1=None, stop1=None,
startfend1=None, stopfend1=None, start2=None, stop2=None, startfend2=None, stopfend2=None,
datatype='enrichment', skipfiltered=False, returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill with data requested in 'datatype'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The name of a chromosome contained in 'hic'.
:type chrom: str.
:param binsize: This is the coordinate width of each bin. A value of zero indicates unbinned. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any fend not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The smallest coordinate to include in the array, measured from fend midpoints or the start of the first bin. If 'binbounds' is given, this value is ignored. If both 'start' and 'startfend' are given, 'start' will override 'startfend'. If unspecified, this will be set to the midpoint of the first fend for 'chrom', adjusted to the first multiple of 'binsize' if not zero. Optional.
:type start: int.
:param stop: The largest coordinate to include in the array, measured from fend midpoints or the end of the last bin. If 'binbounds' is given, this value is ignored. If both 'stop' and 'stopfend' are given, 'stop' will override 'stopfend'. If unspecified, this will be set to the midpoint of the last fend plus one for 'chrom', adjusted to the last multiple of 'start' + 'binsize' if not zero. Optional.
:type stop: int.
:param startfend: The first fend to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'start' is not given, this is set to the first valid fend in 'chrom'. In cases where 'start' is specified and conflicts with 'startfend', 'start' is given preference. Optional
:type startfend: int.
:param stopfend: The first fend not to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'stop' is not given, this is set to the last valid fend in 'chrom' plus one. In cases where 'stop' is specified and conflicts with 'stopfend', 'stop' is given preference. Optional.
:type stopfend: str.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param maxdistance: This specifies the maximum coordinate distance between bins that will be included in the array. If set to zero, all distances are included.
:type maxdistance: str.
:param skipfiltered: If 'True', all interaction bins for filtered out fends are removed and a reduced-size array is returned.
:type skipfiltered: bool.
:param returnmapping: If 'True', a list containing the data array and two 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin for the first and second axis is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing data requested with 'datatype'.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment', 'expected']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
elif datatype in ['fend', 'enrichment'] and hic.normalization == 'none':
if not silent:
print >> sys.stderr, ("Normalization has not been performed yet on this project. Select either 'raw' or 'distance' for datatype. No data returned\n"),
return None
# Determine start, stop, startfend, and stopfend
chrint1 = hic.chr2int[chrom1.strip('chr')]
chrint2 = hic.chr2int[chrom2.strip('chr')]
if not binbounds1 is None:
start1 = binbounds1[0, 0]
stop1 = binbounds1[-1, 1]
startfend1 = _find_fend_from_coord(hic, chrint1, start1)
stopfend1 = _find_fend_from_coord(hic, chrint1, stop1) + 1
else:
if start1 is None and startfend1 is None:
startfend1 = hic.fends['chr_indices'][chrint1]
while startfend1 < hic.fends['chr_indices'][chrint1 + 1] and hic.filter[startfend1] == 0:
startfend1 += 1
if startfend1 == hic.fends['chr_indices'][chrint1 + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start1 = hic.fends['fends']['mid'][startfend1]
if binsize > 0:
start1 = (start1 / binsize) * binsize
elif start1 is None:
start1 = hic.fends['fends']['mid'][startfend1]
if binsize > 0:
start1 = (start1 / binsize) * binsize
else:
startfend1 = _find_fend_from_coord(hic, chrint1, start1)
if (stop1 is None or stop1 == 0) and stopfend1 is None:
stopfend1 = hic.fends['chr_indices'][chrint1 + 1]
while stopfend1 > hic.fends['chr_indices'][chrint1] and hic.filter[stopfend1 - 1] == 0:
stopfend1 -= 1
stop1 = hic.fends['fends']['mid'][stopfend1 - 1]
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
elif stop1 is None or stop1 == 0:
stop1 = hic.fends['fends']['mid'][stopfend1 - 1]
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
else:
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
stopfend1 = _find_fend_from_coord(hic, chrint1, stop1) + 1
if not binbounds1 is None:
start2 = binbounds1[0, 0]
stop2 = binbounds1[-1, 1]
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2) + 1
else:
if start2 is None and startfend2 is None:
startfend2 = hic.fends['chr_indices'][chrint2]
while startfend2 < hic.fends['chr_indices'][chrint2 + 1] and hic.filter[startfend2] == 0:
startfend2 += 1
if startfend2 == hic.fends['chr_indices'][chrint2 + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start2 = hic.fends['fends']['mid'][startfend2]
if binsize > 0:
start2 = (start2 / binsize) * binsize
elif start2 is None:
start2 = hic.fends['fends']['mid'][startfend2]
if binsize > 0:
start2 = (start2 / binsize) * binsize
else:
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
if (stop2 is None or stop2 == 0) and stopfend2 is None:
stopfend2 = hic.fends['chr_indices'][chrint2 + 1]
while stopfend2 > hic.fends['chr_indices'][chrint2] and hic.filter[stopfend2 - 1] == 0:
stopfend2 -= 1
stop2 = hic.fends['fends']['mid'][stopfend2 - 1]
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
elif stop2 is None or stop2 == 0:
stop2 = hic.fends['fends']['mid'][stopfend2 - 1]
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
else:
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2) + 1
if not silent:
print >> sys.stderr, ("Finding %s array for %s:%i-%i by %s:%i-%i...") % (datatype, chrom1,
start1, stop1, chrom2, start2,
stop2),
# If datatype is not 'expected', pull the needed slice of data
if datatype != 'expected':
if chrint1 < chrint2:
start_index = hic.data['trans_indices'][startfend1]
stop_index = hic.data['trans_indices'][stopfend1]
else:
start_index = hic.data['trans_indices'][startfend2]
stop_index = hic.data['trans_indices'][stopfend2]
if start_index == stop_index:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
if chrint1 < chrint2:
data_indices = hic.data['trans_indices'][startfend1:(stopfend1 + 1)]
else:
data_indices = hic.data['trans_indices'][startfend2:(stopfend2 + 1)]
data_indices -= data_indices[0]
data = hic.data['trans_data'][start_index:stop_index, :]
if chrint1 < chrint2:
data[:, 0] -= startfend1
data[:, 1] -= startfend2
else:
data[:, 0] -= startfend2
data[:, 1] -= startfend1
else:
data_indices = None
data = None
# Determine mapping of valid fends to bins
mapping1 = numpy.zeros(stopfend1 - startfend1, dtype=numpy.int32) - 1
mapping2 = numpy.zeros(stopfend2 - startfend2, dtype=numpy.int32) - 1
valid1 = numpy.where(hic.filter[startfend1:stopfend1] > 0)[0].astype(numpy.int32)
valid2 = numpy.where(hic.filter[startfend2:stopfend2] > 0)[0].astype(numpy.int32)
mids1 = hic.fends['fends']['mid'][startfend1:stopfend1]
mids2 = hic.fends['fends']['mid'][startfend2:stopfend2]
if binsize == 0 and binbounds1 is None:
if skipfiltered:
mapping1[valid1] = numpy.arange(valid1.shape[0])
num_bins1 = valid1.shape[0]
else:
mapping1[valid1] = valid1
num_bins1 = mapping1.shape[0]
elif not binbounds1 is None:
start_indices = numpy.searchsorted(binbounds1[:, 0], mids1[valid1], side='right') - 1
stop_indices = numpy.searchsorted(binbounds1[:, 1], mids1[valid1], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid1 = valid1[where]
mapping1[valid1] = start_indices[where]
num_bins1 = binbounds1.shape[0]
else:
mapping1[valid1] = (mids1[valid1] - start1) / binsize
num_bins1 = (stop1 - start1) / binsize
if binsize == 0 and binbounds2 is None:
if skipfiltered:
mapping2[valid2] = numpy.arange(valid2.shape[0])
num_bins2 = valid2.shape[0]
else:
mapping2[valid2] = valid2
num_bins2 = mapping2.shape[0]
elif not binbounds2 is None:
start_indices = numpy.searchsorted(binbounds2[:, 0], mids2[valid2], side='right') - 1
stop_indices = numpy.searchsorted(binbounds2[:, 1], mids2[valid2], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid2 = valid2[where]
mapping2[valid2] = start_indices[where]
num_bins2 = binbounds2.shape[0]
else:
mapping2[valid2] = (mids2[valid2] - start2) / binsize
num_bins2 = (stop2 - start2) / binsize
# Find maximum interaction partner for each fend
if num_bins1 < 1 or num_bins2 < 1:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
# If correction is required, determine what type and get appropriate data
if hic.normalization != 'binning' and datatype != 'raw':
corrections1 = hic.corrections[startfend1:stopfend1]
corrections2 = hic.corrections[startfend2:stopfend2]
elif datatype == 'raw':
corrections1 = numpy.ones(stopfend1 - startfend1, dtype=numpy.float32)
corrections2 = numpy.ones(stopfend2 - startfend2, dtype=numpy.float32)
else:
corrections1 = None
corrections2 = None
if ((hic.normalization in ['express', 'probability'] and
datatype == 'fend') or datatype == 'raw'):
correction_sums1 = numpy.zeros(num_bins1, dtype=numpy.float64)
correction_sums2 = numpy.zeros(num_bins2, dtype=numpy.float64)
if datatype == 'fend':
correction_sums1[:] = numpy.bincount(mapping1[valid1], weights=corrections1[valid1], minlength=num_bins1)
correction_sums2[:] = numpy.bincount(mapping2[valid2], weights=corrections2[valid2], minlength=num_bins2)
else:
correction_sums1[:] = numpy.bincount(mapping1[valid1], minlength=num_bins1)
correction_sums2[:] = numpy.bincount(mapping2[valid2], minlength=num_bins2)
else:
correction_sums1 = None
correction_sums2 = None
if (hic.normalization in ['binning', 'binning-express', 'binning-probability'] and
datatype not in ['raw', 'distance']):
binning_corrections = hic.binning_corrections
binning_num_bins = hic.binning_num_bins
fend_indices = hic.binning_fend_indices
else:
binning_corrections = None
binning_num_bins = None
fend_indices = None
if datatype in ['distance', 'enrichment', 'expected']:
if 'trans_means' not in hic.__dict__.keys():
hic.find_trans_means()
if chrint1 < chrint2:
index = chrint1 * (hic.fends['chromosomes'].shape[0] - 1) - chrint1 * (chrint1 + 1) / 2 - 1 + chrint2
else:
index = chrint2 * (hic.fends['chromosomes'].shape[0] - 1) - chrint2 * (chrint2 + 1) / 2 - 1 + chrint1
trans_mean = hic.trans_means[index]
else:
trans_mean = 1.0
# Create data array
if chrint1 < chrint2:
data_array = numpy.zeros((num_bins1, num_bins2, 2), dtype=numpy.float32)
else:
data_array = numpy.zeros((num_bins2, num_bins1, 2), dtype=numpy.float32)
# Fill in data values
if chrint1 < chrint2:
_hic_binning.find_trans_expected(mapping1, mapping2, corrections1, corrections2, binning_corrections,
binning_num_bins, fend_indices, data_array,
correction_sums1, correction_sums2, trans_mean, startfend1, startfend2)
if datatype != 'expected':
_hic_binning.find_trans_observed(data, data_indices, mapping1, mapping2, data_array)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
corrections1.fill(1.0)
corrections2.fill(1.0)
correction_sums1 = numpy.bincount(mapping1[valid1], minlength=num_bins1).astype(numpy.float64)
correction_sums2 = numpy.bincount(mapping2[valid2], minlength=num_bins2).astype(numpy.float64)
_hic_binning.find_trans_expected(mapping1, mapping2, corrections1, corrections2, None, None, None,
data_array, correction_sums1, correction_sums2, 1.0, startfend1,
startfend2)
temp = data_array[:, :, 0]
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1] = temp
else:
_hic_binning.find_trans_expected(mapping2, mapping1, corrections2, corrections1, binning_corrections,
binning_num_bins, fend_indices, data_array,
correction_sums2, correction_sums1, trans_mean, startfend2, startfend1)
if datatype != 'expected':
_hic_binning.find_trans_observed(data, data_indices, mapping2, mapping1, data_array)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
corrections1.fill(1.0)
corrections2.fill(1.0)
correction_sums1 = numpy.bincount(mapping1[valid1], minlength=num_bins1).astype(numpy.float64)
correction_sums2 = numpy.bincount(mapping2[valid2], minlength=num_bins2).astype(numpy.float64)
_hic_binning.find_trans_expected(mapping2, mapping1, corrections2, corrections1, None, None, None,
data_array, correction_sums2, correction_sums1, 1.0, startfend2,
startfend1)
temp = data_array[:, :, 0]
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1] = temp
if chrint2 < chrint1:
data_array = numpy.transpose(data_array, (1, 0, 2))
if returnmapping:
bin_mapping1 = numpy.zeros((num_bins1, 4), dtype=numpy.int32)
if binsize == 0 and binbounds1 is None:
if skipfiltered:
bin_mapping1[:, 2] = valid1 + startfend1
else:
bin_mapping1[:, 2] = numpy.arange(startfend1, stopfend1)
bin_mapping1[:, 3] = bin_mapping1[:, 2] + 1
bin_mapping1[:, 0] = hic.fends['fends']['start'][bin_mapping1[:, 2]]
bin_mapping1[:, 1] = hic.fends['fends']['stop'][bin_mapping1[:, 2]]
else:
if binbounds1 is None:
bin_mapping1[:, 0] = start1 + binsize * numpy.arange(num_bins1)
bin_mapping1[:, 1] = bin_mapping1[:, 0] + binsize
else:
bin_mapping1[:, :2] = binbounds1
bin_mapping1[:, 2] = numpy.searchsorted(mids1, bin_mapping1[:, 0]) + startfend1
bin_mapping1[:, 3] = numpy.searchsorted(mids1, bin_mapping1[:, 1]) + startfend1
bin_mapping2 = numpy.zeros((num_bins2, 4), dtype=numpy.int32)
if binsize == 0 and binbounds2 is None:
if skipfiltered:
bin_mapping2[:, 2] = valid2 + startfend2
else:
bin_mapping2[:, 2] = numpy.arange(startfend2, stopfend2)
bin_mapping2[:, 3] = bin_mapping2[:, 2] + 1
bin_mapping2[:, 0] = hic.fends['fends']['start'][bin_mapping2[:, 2]]
bin_mapping2[:, 1] = hic.fends['fends']['stop'][bin_mapping2[:, 2]]
else:
if binbounds2 is None:
bin_mapping2[:, 0] = start2 + binsize * numpy.arange(num_bins2)
bin_mapping2[:, 1] = bin_mapping2[:, 0] + binsize
else:
bin_mapping2[:, :2] = binbounds2
bin_mapping2[:, 2] = numpy.searchsorted(mids2, bin_mapping2[:, 0]) + startfend2
bin_mapping2[:, 3] = numpy.searchsorted(mids2, bin_mapping2[:, 1]) + startfend2
if not silent:
print >> sys.stderr, ("Done\n"),
return [data_array, bin_mapping1, bin_mapping2]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return data_array
def bin_trans_array(data_array, data_mapping1, data_mapping2, binsize=10000, binbounds1=None, start1=None, stop1=None,
binbounds2=None, start2=None, stop2=None, returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill 'binsize' bins or bins defined by 'binbounds' with data provided in the array passed by 'unbinned'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param data_array: A 3d array containing data to be binned.
:type data_array: numpy array
:param data_mapping1: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges along the first axis in 'data_array'.
:type data_mapping1: numpy array
:param data_mapping2: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges along the second axis in 'data_array'.
:type data_mapping2: numpy array
:param binsize: This is the coordinate width of each bin. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds1: An array containing start and stop coordinates for a set of user-defined bins along the first axis. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds1: numpy array
:param start1: The coordinate at the beginning of the first bin for the first axis of the binned data. If unspecified, 'start1' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping1'. If 'binbounds1' is given, 'start1' is ignored. Optional.
:type start1: int.
:param stop1: The coordinate at the end of the last bin for the first axis of the binned data. If unspecified, 'stop1' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping1'. If needed, 'stop1' is adjusted upward to create a complete last bin. If 'binbounds1' is given, 'stop1' is ignored. Optional.
:type stop1: int.
:param binbounds2: An array containing start and stop coordinates for a set of user-defined bins along the second axis. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds2: numpy array
:param start2: The coordinate at the beginning of the first bin for the second axis of the binned data. If unspecified, 'start2' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping2'. If 'binbounds2' is given, 'start2' is ignored. Optional.
:type start2: int.
:param stop2: The coordinate at the end of the last bin for the second axis of the binned data. If unspecified, 'stop2' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping2'. If needed, 'stop2' is adjusted upward to create a complete last bin. If 'binbounds2' is given, 'stop2' is ignored. Optional.
:type stop2: int.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing binned data requested with 'datatype' pulled from 'unbinned'.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# Determine start and stop, if necessary
if binbounds1 is None:
if start1 is None:
start1 = (data_mapping1[0, 0] / binsize) * binsize
if stop1 is None:
stop1 = ((data_mapping1[-1, 1] - 1) / binsize + 1) * binsize
else:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
num_bins1 = (stop1 - start1) / binsize
binbounds1 = numpy.zeros((num_bins1, 2), dtype=numpy.int32)
binbounds1[:, 0] = numpy.arange(num_bins1) * binsize + start1
binbounds1[:, 1] = binbounds1[:, 0] + binsize
else:
num_bins1 = binbounds1.shape[0]
start1 = binbounds1[0, 0]
stop1 = binbounds1[0, 1]
if binbounds2 is None:
if start2 is None:
start2 = (data_mapping2[0, 0] / binsize) * binsize
if stop2 is None:
stop2 = ((data_mapping2[-1, 1] - 1) / binsize + 1) * binsize
else:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
num_bins2 = (stop2 - start2) / binsize
binbounds2 = numpy.zeros((num_bins2, 2), dtype=numpy.int32)
binbounds2[:, 0] = numpy.arange(num_bins2) * binsize + start2
binbounds2[:, 1] = binbounds2[:, 0] + binsize
else:
num_bins2 = binbounds2.shape[0]
start2 = binbounds2[0, 0]
stop2 = binbounds2[0, 1]
mids1 = (data_mapping1[:, 0] + data_mapping1[:, 1]) / 2
mids2 = (data_mapping2[:, 0] + data_mapping2[:, 1]) / 2
if not silent:
print >> sys.stderr, ("Finding binned trans array..."),
# Find bin mapping for each fend
mapping1 = numpy.zeros(mids1.shape[0], dtype=numpy.int32) - 1
fend_ranges1 = numpy.zeros((binbounds1.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds1.shape[0]):
firstbin = numpy.searchsorted(mids1, binbounds1[i, 0])
lastbin = numpy.searchsorted(mids1, binbounds1[i, 1])
mapping1[firstbin:lastbin] = i
fend_ranges1[i, 0] = data_mapping1[firstbin, 2]
fend_ranges1[i, 1] = data_mapping1[lastbin, 3]
valid1 = numpy.where(mapping1 >= 0)[0]
mapping2 = numpy.zeros(mids2.shape[0], dtype=numpy.int32) - 1
fend_ranges2 = numpy.zeros((binbounds2.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds2.shape[0]):
firstbin = numpy.searchsorted(mids2, binbounds2[i, 0])
lastbin = numpy.searchsorted(mids2, binbounds2[i, 1])
mapping2[firstbin:lastbin] = i
fend_ranges2[i, 0] = data_mapping2[firstbin, 2]
fend_ranges2[i, 1] = data_mapping2[lastbin, 3]
valid2 = numpy.where(mapping2 >= 0)[0]
# Create requested array
binned_array = numpy.zeros((num_bins1, num_bins2, 2), dtype=numpy.float32)
# Fill in binned data values
for i in range(valid1.shape[0]):
binned_array[i, :, 0] = numpy.bincount(mapping2[valid2], weights=data_array[valid1[i], valid2, 0],
minlength=num_bins2)
binned_array[i, :, 1] = numpy.bincount(mapping2[valid2], weights=data_array[valid1[i], valid2, 1],
minlength=num_bins2)
# If mapping requested, calculate bin bounds
if returnmapping:
mapping1 = numpy.zeros((num_bins1, 4), dtype=numpy.int32)
mapping1[:, 0] = binbounds1[:, 0]
mapping1[:, 1] = binbounds1[:, 1]
mapping1[:, 2:4] = fend_ranges1
mapping2 = numpy.zeros((num_bins2, 4), dtype=numpy.int32)
mapping2[:, 0] = binbounds2[:, 0]
mapping2[:, 1] = binbounds2[:, 1]
mapping2[:, 2:4] = fend_ranges2
if not silent:
print >> sys.stderr, ("Done\n"),
return [binned_array, mapping1, mapping2]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return binned_array
def dynamically_bin_trans_array(unbinned, unbinnedpositions1, unbinnedpositions2, binned, binbounds1, binbounds2,
minobservations=10, searchdistance=0, removefailed=False, **kwargs):
"""
Expand bins in 'binned' to include additional data provided in 'unbinned' as necessary to meet 'minobservations', or 'searchdistance' criteria.
:param unbinned: A 3d array containing data to be used for filling expanding bins. This array should be N x M x 2, where N is the number of bins or fends from the first chromosome and M is the number of bins or fends from the second chromosome.
:type unbinned: numpy array
:param unbinnedpositions1: A 2d integer array indicating the first and last coordinate of each bin along the first axis in 'unbinned' array.
:type unbinnedpositions1: numpy array
:param unbinnedpositions2: A 2d integer array indicating the first and last coordinate of each bin along the first axis in 'unbinned' array.
:type unbinnedpositions2: numpy array
:param binned: A 3d array containing binned data to be dynamically binned. This array should be N x M x 2, where N is the number of bins from the first chromosome and M is the number of bins from the second chromosome. Data in this array will be altered by this function.
:type binned: numpy array
:param binbounds1: An integer array indicating the start and end position of each bin from the first chromosome in the 'binned' array. This array should be N x 2, where N is the size of the first dimension of 'binned'.
:type binbounds1: numpy array
:param binbounds2: An integer array indicating the start and end position of each bin from the second chromosome in the 'binned' array. This array should be N x 2, where N is the size of the second dimension of 'binned'.
:type binbounds2: numpy array
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
if not silent:
print >> sys.stderr, ("Dynamically binning data..."),
# Determine bin edges relative to unbinned positions
unbinnedmids1 = (unbinnedpositions1[:, 0] + unbinnedpositions1[:, 1]) / 2
unbinnedmids2 = (unbinnedpositions2[:, 0] + unbinnedpositions2[:, 1]) / 2
binedges1 = numpy.zeros(binbounds1.shape, dtype=numpy.int32)
binedges1[:, 0] = numpy.searchsorted(unbinnedmids1, binbounds1[:, 0])
binedges1[:, 1] = numpy.searchsorted(unbinnedmids1, binbounds1[:, 1])
binedges2 = numpy.zeros(binbounds2.shape, dtype=numpy.int32)
binedges2[:, 0] = numpy.searchsorted(unbinnedmids2, binbounds2[:, 0])
binedges2[:, 1] = numpy.searchsorted(unbinnedmids2, binbounds2[:, 1])
# Determine bin midpoints
mids1 = (binbounds1[:, 0] + binbounds1[:, 1]) / 2
mids2 = (binbounds2[:, 0] + binbounds2[:, 1]) / 2
# Dynamically bin using appropriate array type combination
_hic_binning.dynamically_bin_trans(unbinned, unbinnedmids1, unbinnedmids2, binned, binedges1,
binedges2, mids1, mids2, minobservations, searchdistance, int(removefailed))
if not silent:
print >> sys.stderr, ("Done\n"),
return None
def write_heatmap_dict(hic, filename, binsize, includetrans=True, datatype='enrichment', chroms=[],
dynamically_binned=False, minobservations=0, searchdistance=0, expansion_binsize=0,
removefailed=False, **kwargs):
"""
Create an h5dict file containing binned interaction arrays, bin positions, and an index of included chromosomes. This function is MPI compatible.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param filename: Location to write h5dict object to.
:type filename: str.
:param binsize: Size of bins for interaction arrays.
:type binsize: int.
:param includetrans: Indicates whether trans interaction arrays should be calculated and saved.
:type includetrans: bool.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param chroms: A list of chromosome names indicating which chromosomes should be included. If left empty, all chromosomes are included. Optional.
:type chroms: list
:param dynamically_binned: If 'True', return dynamically binned data.
:type dynamically_binned: bool.
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param expansion_binsize: The size of bins to use for data to pull from when expanding dynamic bins. If set to zero, unbinned data is used.
:type expansion_binsize: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
# check if MPI is available
if 'mpi4py' in sys.modules.keys():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_procs = comm.Get_size()
else:
comm = None
rank = 0
num_procs = 1
if ('silent' in kwargs and kwargs['silent']) or rank > 0:
silent = True
else:
silent = False
# Check if trans mean is needed and calculate if not already done
if includetrans and datatype in ['distance', 'enrichment'] and 'trans_mean' not in hic.__dict__.keys():
hic.find_trans_means()
# Check if filename already exists, and remove if it does
if rank == 0:
if os.path.exists(filename):
if not silent:
print >> sys.stderr, ("%s already exists, overwriting.") % filename
subprocess.call('rm %s' % filename, shell=True)
if not silent:
print >> sys.stderr, ("Creating binned heatmap...\n"),
output = h5py.File(filename, 'w')
output.attrs['resolution'] = binsize
# If chromosomes not specified, fill list
if len(chroms) == 0:
chroms = list(hic.fends['chromosomes'][...])
# Assemble list of requested arrays
needed = []
chr_indices = hic.fends['chr_indices'][...]
for i in range(len(chroms))[::-1]:
chrom = chroms[i]
chrint = hic.chr2int[chrom]
if numpy.sum(hic.filter[chr_indices[chrint]:chr_indices[chrint + 1]]) > 0:
needed.append((chrom,))
else:
del chroms[i]
if includetrans:
for i in range(len(chroms)-1):
for j in range(i + 1, len(chroms)):
needed.append((chroms[i],chroms[j]))
if num_procs == 1:
node_needed = needed
else:
node_ranges = numpy.round(numpy.linspace(0, len(needed), num_procs + 1)).astype(numpy.int32)
for i in range(1, num_procs):
comm.send(needed[node_ranges[i]:node_ranges[i + 1]], dest=i, tag=11)
node_needed = needed[node_ranges[0]:node_ranges[1]]
else:
node_needed = comm.recv(source=0, tag=11)
heatmaps = {}
# Find heatmaps
for chrom in node_needed:
if len(chrom) == 1:
# Find cis heatmap
# determine if data is to be dynamically binned
if not dynamically_binned:
heatmaps[chrom] = find_cis_signal(hic, chrom[0], binsize=binsize, datatype=datatype,
arraytype='upper', returnmapping=True, silent=silent,
skipfiltered=True)
else:
temp = find_cis_signal(hic, chrom[0], binsize=expansion_binsize, datatype=datatype, arraytype='upper',
returnmapping=True, silent=silent)
if temp is None:
continue
expansion, exp_mapping = temp
binned, mapping = find_cis_signal(hic, chrom[0], binsize=binsize, datatype=datatype,
arraytype='upper', returnmapping=True, silent=silent)
dynamically_bin_cis_array(expansion, exp_mapping, binned, mapping, minobservations=minobservations,
searchdistance=searchdistance, removefailed=removefailed, silent=silent)
heatmaps[chrom] = [binned, mapping]
else:
# Find trans heatmap
# determine if data is to be dynamically binned
if not dynamically_binned:
heatmaps[chrom] = find_trans_signal(hic, chrom[0], chrom[1], binsize=binsize, datatype=datatype,
returnmapping=False, silent=silent, skipfiltered=True)
else:
temp = find_trans_signal(hic, chrom[0], chrom[1], binsize=expansion_binsize, datatype=datatype,
returnmapping=True, silent=silent)
if temp is None:
continue
expansion, exp_mapping1, exp_mapping2 = temp
binned, mapping1, mapping2 = find_trans_signal(hic, chrom[0], chrom[1], binsize=binsize,
datatype=datatype, returnmapping=True, silent=silent)
dynamically_bin_trans_array(expansion, exp_mapping1, exp_mapping2, binned, mapping1, mapping2,
minobservations=minobservations, searchdistance=searchdistance,
removefailed=removefailed, silent=silent)
heatmaps[chrom] = binned
# Check if array contains data
if heatmaps[chrom] is None or heatmaps[chrom][0].shape[0] == 0:
del heatmaps[chrom]
# Collect heatmaps at node 0 and write to h5dict
if rank == 0:
if num_procs > 1:
for i in range(1, num_procs):
if node_ranges[i + 1] - node_ranges[i] > 0:
temp = comm.recv(source=i, tag=11)
heatmaps.update(temp)
del temp
for chrom in heatmaps.keys():
if len(chrom) == 1:
output.create_dataset('%s.counts' % chrom[0], data=heatmaps[chrom][0][:, 0])
output.create_dataset('%s.expected' % chrom[0], data=heatmaps[chrom][0][:, 1])
output.create_dataset('%s.positions' % chrom[0], data=heatmaps[chrom][1][:, :2])
else:
output.create_dataset('%s_by_%s.counts' % (chrom[0], chrom[1]), data=heatmaps[chrom][:, :, 0])
output.create_dataset('%s_by_%s.expected' % (chrom[0], chrom[1]), data=heatmaps[chrom][:, :, 1])
output.create_dataset('chromosomes', data=numpy.array(chroms))
if 'history' in kwargs:
output.attrs['history'] = kwargs['history']
output.close()
if not silent:
print >> sys.stderr, ("Creating binned heatmap...Done\n"),
else:
if len(heatmaps) > 0:
comm.send(heatmaps, dest=0, tag=11)
del heatmaps
return None
def find_multiresolution_heatmap(hic, chrom, start, stop, chrom2=None, start2=None, stop2=None, minbinsize=5000,
maxbinsize=12800000, minobservations=5, datatype='fend', midbinsize=40000,
silent=True):
"""
Create a multi-resolution data and index heatmap array for a chromosome or chromosome pair.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The first (or only) chromosome to find the multi-resolution heatmap for.
:type chrom: str.
:param start: The first bin start coordinate.
:type start: int.
:param stop: The last bin stop coordinate. The difference between start and stop must be a multiple of maxbinsize.
:type stop: int.
:param chrom2: The second chromosome to find the multi-resolution heatmap for. If None, an intra-chromosomal multi-resolution heatmap is returned for chrom.
:type chrom2: str.
:param start2: The first bin start coordinate for the second chromosome.
:type start2: int.
:param stop2: The last bin stop coordinate for the second chromosome. The difference between start and stop must be a multiple of maxbinsize.
:type stop2: int.
:param maxbinsize: The maximum sized bin (lowest resolution) heatmap to be produced for each chromosome.
:type maxbinsize: int.
:param minbinsize: The minimum sized bin (highest resolution) heatmap to be produced for each chromosome.
:type minbinsize: int.
:param minobservations: The minimum number of reads needed for a bin to be considered valid and be included in the heatmap.
:type minobservations: int.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', and 'enrichment'. Observed values are always in the first index along the last axis. If 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and 'enrichment' uses both correction and distance mean values.
:type datatype: str.
:param midbinsize: This is used to determine the smallest bin size (highest resolution) complete heatmap to generate in producing the multi-resolution heatmap. It does not affect the resulting output but can be used to limit the total memory usage, with higher values using less memory but more time.
:type midbinsize: int.
:param silent: Indicates whether to display messages or not.
:type silent: bool.
"""
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
if not chrom2 is None and (start2 is None or stop2 is None):
if not silent:
print >> sys.stderr, ("Need values for start2 and stop2. No data returned\n"),
return None
if (stop - start) % maxbinsize != 0 or (not chrom2 is None and (stop2 - start2) % maxbinsize != 0):
if not silent:
print >> sys.stderr, ("Genomic intervals must be multiples of maxbinsize. No data returned\n"),
return None
res_levels = numpy.round(numpy.log(maxbinsize / minbinsize) / numpy.log(2.0)).astype(numpy.int32)
if maxbinsize != minbinsize * 2 ** res_levels:
if not silent:
print >> sys.stderr, ("Maxbinsize must be a multiple of 2^N and minbinsize for an integer N. No data returned\n"),
return None
if not silent:
if chrom2 is None:
target = chrom
else:
target = '%s by %s' % (chrom, chrom2)
print >> sys.stderr, ("\r%s\rFinding multi-resolution heatmap for %s...") % (' ' * 80, target),
# determine if finding cis or trans multi-resolution heatmap
chrint = hic.chr2int[chrom]
chrint2 = None
span = stop - start
startfend = _find_fend_from_coord(hic, chrint, start)
stopfend = _find_fend_from_coord(hic, chrint, stop)
if chrom2 is None:
trans = False
else:
span2 = stop2 - start2
chrint2 = hic.chr2int[chrom2]
trans = True
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2)
# determine actual midresolution limit
temp = maxbinsize
while temp / 2 >= max(midbinsize, minbinsize):
temp /= 2
midbinsize = temp
# pull relevant data
n = span / midbinsize
valid = numpy.where(hic.filter[startfend:stopfend])[0].astype(numpy.int32)
fend_nums = valid + startfend
mids = hic.fends['fends']['mid'][fend_nums] - start
binbounds = numpy.round(numpy.linspace(0, span, n + 1)).astype(numpy.int32)
bin_mids = (binbounds[:-1] + binbounds[1:]) / 2
mapping = numpy.empty(stopfend - startfend, dtype=numpy.int32)
mapping.fill(-1)
mapping[valid] = numpy.arange(valid.shape[0])
binmapping = mids / midbinsize
obs_indices = numpy.searchsorted(mids, binbounds).astype(numpy.int32)
if hic.normalization in ['express', 'probability', 'binning-express', 'binning-probability']:
corrections = hic.corrections[fend_nums]
correction_sums = numpy.bincount(binmapping, weights=corrections, minlength=n).astype(numpy.float32)
else:
corrections = None
correction_sums = None
if hic.normalization in ['binning', 'binning-express', 'binning-probability']:
binning_corrections = hic.binning_corrections
fend_indices = hic.binning_fend_indices[fend_nums, :, :]
else:
binning_corrections = None
fend_indices = None
if datatype in ['distance', 'enrichment']:
distance_parameters = hic.distance_parameters
chrom_mean = hic.chromosome_means[chrint]
else:
distance_parameters = None
chrom_mean = 0.0
if trans:
m = span2 / midbinsize
valid2 = numpy.where(hic.filter[startfend2:stopfend2])[0]
fend_nums2 = valid2 + startfend2
mids2 = hic.fends['fends']['mid'][fend_nums2] - start2
binbounds2 = numpy.round(numpy.linspace(0, span2, m + 1)).astype(numpy.int32)
bin_mids2 = (binbounds2[:-1] + binbounds2[1:]) / 2
obs_indices2 = numpy.searchsorted(mids2, binbounds2).astype(numpy.int32)
mapping2 = numpy.empty(stopfend2 - startfend2, dtype=numpy.int32)
mapping2.fill(-1)
mapping2[valid2] = numpy.arange(valid2.shape[0])
binmapping2 = mids2 / midbinsize
if hic.normalization in ['express', 'probability', 'binning-express', 'binning-probability']:
corrections2 = hic.corrections[fend_nums2]
correction_sums2 = numpy.bincount(binmapping2, weights=corrections2, minlength=m).astype(numpy.float32)
else:
corrections2 = None
correction_sums2 = None
if hic.normalization in ['binning', 'binning-express', 'binning-probability']:
fend_indices2 = hic.binning_fend_indices[fend_nums2, :, :]
else:
fend_indices2 = None
if datatype in ['distance', 'enrichment']:
if 'trans_means' not in hic.__dict__.keys():
hic.find_trans_means()
if chrint < chrint2:
index = chrint * (hic.fends['chromosomes'].shape[0] - 1) - chrint * (chrint + 1) / 2 - 1 + chrint2
else:
index = chrint2 * (hic.fends['chromosomes'].shape[0] - 1) - chrint2 * (chrint2 + 1) / 2 - 1 + chrint
chrom_mean = hic.trans_means[index]
# pull relevant trans observations and remap
if chrint2 < chrint:
start_index = hic.data['trans_indices'][startfend2]
stop_index = hic.data['trans_indices'][stopfend2]
data = hic.data['trans_data'][start_index:stop_index, :]
data_indices = hic.data['trans_indices'][startfend2:(stopfend2 + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping2,
mapping,
startfend,
stopfend,
startfend2,
stopfend2 - startfend2,
1)
else:
start_index = hic.data['trans_indices'][startfend]
stop_index = hic.data['trans_indices'][stopfend]
data = hic.data['trans_data'][start_index:stop_index, :]
data_indices = hic.data['trans_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping,
mapping2,
startfend2,
stopfend2,
startfend,
stopfend - startfend,
0)
else:
# pull relevant cis observations
start_index = hic.data['cis_indices'][startfend]
stop_index = hic.data['cis_indices'][stopfend]
data = hic.data['cis_data'][start_index:stop_index, :]
data_indices = hic.data['cis_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping,
None,
startfend,
stopfend,
startfend,
stopfend - startfend,
0)
if trans and chrint2 < chrint:
data = data[numpy.lexsort((data[:, 1], data[:, 0])), :]
data_indices = numpy.r_[0, numpy.bincount(data[:num_data, 0], minlength=valid.shape[0])].astype(numpy.int64)
for i in range(1, data_indices.shape[0]):
data_indices[i] += data_indices[i - 1]
data = data[:data_indices[-1], 1:]
# convert observations into binned matrix
if trans:
observed = numpy.zeros((n, m), dtype=numpy.int32)
else:
observed = numpy.zeros((n, n), dtype=numpy.int32)
binmapping2 = None
_hic_binning.find_mrh_observed(
data,
data_indices,
observed,
binmapping,
binmapping2)
expected = numpy.zeros(observed.shape, dtype=numpy.float32)
datatype_int = {'raw':0, 'fend':1, 'distance':2, 'enrichment':3}
dt_int = datatype_int[datatype]
if trans:
_hic_binning.find_mrh_trans_expected(
expected,
binmapping,
binmapping2,
obs_indices,
obs_indices2,
corrections,
corrections2,
correction_sums,
correction_sums2,
binning_corrections,
fend_indices,
fend_indices2,
chrom_mean,
dt_int)
else:
_hic_binning.find_mrh_cis_expected(
expected,
fend_nums,
binmapping,
mapping,
mids,
obs_indices,
corrections,
correction_sums,
binning_corrections,
fend_indices,
distance_parameters,
chrom_mean,
dt_int)
# find features for largest binned data array
n_bins = span / maxbinsize
m_bins = 0
binbounds = numpy.linspace(0, span, n_bins + 1)
if trans:
m_bins = span2 / maxbinsize
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
# find fend assignments for largest bin sizes
obs_indices = numpy.searchsorted(bin_mids, binbounds).astype(numpy.int32)
if trans:
obs_indices2 = numpy.searchsorted(bin_mids2, binbounds2).astype(numpy.int32)
else:
obs_indices2 = None
# make data arrays to hold output
if trans:
current_level_data = numpy.zeros(n_bins * m_bins, dtype=numpy.float32)
else:
current_level_data = numpy.zeros((n_bins * (n_bins + 1)) / 2, dtype=numpy.float32)
current_level_indices = numpy.empty(current_level_data.shape, dtype=numpy.int32)
current_level_indices.fill(-1)
current_level_shapes = numpy.zeros(current_level_data.shape, dtype=numpy.int32)
bin_position = numpy.empty(current_level_data.shape, dtype=numpy.int32)
bin_position.fill(-1)
# find largest binned data array
if trans:
_hic_binning.make_trans_mrh_toplevel(observed,
expected,
current_level_data,
obs_indices,
obs_indices2,
bin_position,
minobservations)
else:
_hic_binning.make_cis_mrh_toplevel(observed,
expected,
current_level_data,
obs_indices,
bin_position,
minobservations)
all_data = [current_level_data]
all_indices = [current_level_indices]
all_shapes = [current_level_shapes]
# find subpartitioning for all valid bins for each resolution level
resolution = maxbinsize / 2
if trans:
pos = n_bins * m_bins
else:
pos = (n_bins * (n_bins + 1)) / 2
# find levels below the first but above or equal to midbinsize
while resolution >= midbinsize:
prev_bin_position = bin_position
bin_position = numpy.empty(prev_bin_position.shape[0] * 4, dtype=numpy.int32)
bin_position.fill(-1)
prev_level_data = all_data[-1]
current_level_data = numpy.empty(prev_level_data.shape[0] * 4, dtype=numpy.float32)
current_level_data.fill(numpy.nan)
prev_level_indices = all_indices[-1]
prev_level_shapes = all_shapes[-1]
prev_n_bins = n_bins
prev_m_bins = 0
n_bins = span / resolution
binbounds = numpy.linspace(0, span, n_bins + 1)
obs_indices = numpy.searchsorted(bin_mids, binbounds).astype(numpy.int32)
if trans:
prev_m_bins = m_bins
m_bins = span2 / resolution
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
obs_indices2 = numpy.searchsorted(bin_mids2, binbounds2).astype(numpy.int32)
if trans:
_hic_binning.make_trans_mrh_midlevel(observed,
expected,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
obs_indices2,
prev_bin_position,
bin_position,
prev_m_bins,
m_bins,
minobservations,
pos)
else:
_hic_binning.make_cis_mrh_midlevel(observed,
expected,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
prev_bin_position,
bin_position,
prev_n_bins,
n_bins,
minobservations,
pos)
where = numpy.where(bin_position >= 0)[0]
pos += where.shape[0]
bin_position = bin_position[where]
all_data.append(current_level_data[where])
if resolution > minbinsize:
all_indices.append(numpy.empty(all_data[-1].shape[0], dtype=numpy.int32))
all_indices[-1].fill(-1)
all_shapes.append(numpy.zeros(all_data[-1].shape[0], dtype=numpy.int32))
resolution /= 2
# find levels below midbinsize
if midbinsize > minbinsize:
while resolution >= minbinsize:
prev_bin_position = bin_position
bin_position = numpy.empty(prev_bin_position.shape[0] * 4, dtype=numpy.int32)
bin_position.fill(-1)
prev_level_data = all_data[-1]
current_level_data = numpy.empty(prev_level_data.shape[0] * 4, dtype=numpy.float32)
current_level_data.fill(numpy.nan)
prev_level_indices = all_indices[-1]
prev_level_shapes = all_shapes[-1]
prev_n_bins = n_bins
prev_m_bins = 0
n_bins = span / resolution
binbounds = numpy.linspace(0, span, n_bins + 1)
obs_indices = numpy.searchsorted(mids, binbounds).astype(numpy.int32)
correction_sums = numpy.zeros(n_bins, dtype=numpy.float32)
for i in range(n_bins):
correction_sums[i] = numpy.sum(corrections[obs_indices[i]:obs_indices[i + 1]])
if trans:
prev_m_bins = m_bins
m_bins = span2 / resolution
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
obs_indices2 = numpy.searchsorted(mids2, binbounds2).astype(numpy.int32)
correction_sums2 = numpy.zeros(m_bins, dtype=numpy.float32)
for i in range(m_bins):
correction_sums2[i] = numpy.sum(corrections2[obs_indices2[i]:obs_indices2[i + 1]])
if trans:
_hic_binning.make_trans_mrh_lowerlevel(data,
data_indices,
correction_sums,
correction_sums2,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
obs_indices2,
prev_bin_position,
bin_position,
prev_m_bins,
m_bins,
minobservations,
pos)
else:
_hic_binning.make_cis_mrh_lowerlevel(data,
data_indices,
corrections,
correction_sums,
fend_nums,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
prev_bin_position,
bin_position,
prev_n_bins,
n_bins,
minobservations,
pos)
where = numpy.where(bin_position >= 0)[0]
pos += where.shape[0]
bin_position = bin_position[where]
all_data.append(current_level_data[where])
if resolution > minbinsize:
all_indices.append(numpy.empty(all_data[-1].shape[0], dtype=numpy.int32))
all_indices[-1].fill(-1)
all_shapes.append(numpy.zeros(all_data[-1].shape[0], dtype=numpy.int32))
resolution /= 2
data = all_data[0]
for i in range(1, len(all_data)):
where = numpy.where(numpy.logical_not(numpy.isnan(all_data[i])))
data = numpy.hstack((data, all_data[i]))
all_data[i] = None
indices = all_indices[0]
for i in range(1, len(all_indices)):
indices = numpy.hstack((indices, all_indices[i]))
all_indices[i] = None
shapes = all_shapes[0]
for i in range(1, len(all_shapes)):
shapes = numpy.hstack((shapes, all_shapes[i]))
all_shapes[i] = None
if not silent:
print >> sys.stderr, ("Done\n"),
return [data, indices, shapes]
| bsd-3-clause | 5,788,391,827,346,598,000 | 57.249842 | 696 | 0.601859 | false |
CroissanceCommune/autonomie | autonomie/alembic/versions/3_0_migrate_task_lines_2192101f133b.py | 1 | 1508 | """3.0 : Migrate task lines
Revision ID: 2192101f133b
Revises: 465776bbb019
Create Date: 2015-06-29 11:57:26.726124
"""
# revision identifiers, used by Alembic.
revision = '2192101f133b'
down_revision = '36b1d9c38c43'
from alembic import op
import sqlalchemy as sa
def upgrade():
from autonomie.models.task import (
TaskLine,
TaskLineGroup,
Task,
Estimation,
CancelInvoice,
Invoice,
)
from autonomie_base.models.base import (
DBSESSION,
)
session = DBSESSION()
index = 0
query = Task.query()
query = query.with_polymorphic([Invoice, CancelInvoice, Estimation])
query = query.filter(
Task.type_.in_(['invoice', 'estimation', 'cancelinvoice'])
)
for task in query:
group = TaskLineGroup(task_id=task.id, order=0)
for line in task.lines:
tline = TaskLine(
group=group,
order=line.rowIndex,
description=line.description,
cost=line.cost,
tva=line.tva,
quantity=line.quantity,
)
if hasattr(line, 'product_id'):
tline.product_id = line.product_id
session.add(tline)
if index % 100 == 0:
session.flush()
op.alter_column(
table_name='estimation_payment',
column_name='rowIndex',
new_column_name='order',
type_=sa.Integer,
)
def downgrade():
pass
| gpl-3.0 | -5,757,862,300,584,383,000 | 21.176471 | 72 | 0.566313 | false |
niosus/EasyClangComplete | tests/test_makefile.py | 1 | 3170 | """Tests for Makefile flags extraction."""
import imp
import platform
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import search_scope
from EasyClangComplete.plugin.flags_sources import makefile
imp.reload(makefile)
imp.reload(flag)
imp.reload(search_scope)
SearchScope = search_scope.TreeSearchScope
Makefile = makefile.Makefile
Flag = flag.Flag
class TestMakefile(object):
"""Test finding and generating flags from Makeifles."""
def test_init(self):
"""Initialization test."""
self.assertEqual(Makefile._FILE_NAME, 'Makefile')
def _get_project_root(self):
return path.join(path.dirname(__file__), 'makefile_files')
def _check_include(self, flags, include):
expected = path.join(self._get_project_root(), include)
self.assertIn(Flag('-I', expected), flags)
def _check_define(self, flags, define):
self.assertIn(Flag('', '-D' + define), flags)
def _check_makefile(self, cache, flags, test_path, makefile_path):
expected = path.join(self._get_project_root(), makefile_path)
self.assertEqual(expected, cache[test_path])
self.assertEqual(flags, cache[expected])
def _check_cache(self, cache, flags, makefile_path):
key = path.join(self._get_project_root(), makefile_path)
self.assertEqual(flags, cache[key])
def test_makefile_root(self):
"""Test finding and parsing root Makefile."""
test_path = path.join(self._get_project_root(), 'main.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, "inc")
self._check_define(flags, "REQUIRED_DEFINE")
self._check_makefile(mfile._cache, flags, test_path, "Makefile")
def test_makefile_lib(self):
"""Test finding and parsing library Makefile."""
test_path = path.join(self._get_project_root(), 'lib', 'bar.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, path.join("lib", "foo"))
self._check_makefile(mfile._cache, flags, test_path,
path.join("lib", "Makefile"))
def test_makefile_sub(self):
"""Test finding and parsing Makefile for library subdir."""
test_path = path.join(self._get_project_root(), 'lib', 'foo', 'foo.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, path.join("lib", "foo"))
self._check_makefile(mfile._cache, flags, test_path,
path.join("lib", "Makefile"))
def test_makefile_fail(self):
"""Test behavior when no Makefile found."""
test_path = path.join(path.dirname(__file__), 'test_files', 'test.cpp')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self.assertTrue(flags is None)
if platform.system() != "Windows":
class MakefileTestRunner(TestMakefile, TestCase):
"""Run make only if we are not on windows."""
pass
| mit | -5,041,730,860,606,384,000 | 34.222222 | 79 | 0.633438 | false |
jyi/ITSP | prophet-gpl/tools/httpd-build.py | 1 | 3243 | # Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from sys import argv
from os import path, chdir, getcwd, environ
from tester_common import extract_arguments
import subprocess
import getopt
def compileit(out_dir, compile_only = False, config_only = False, paraj = 0):
ori_dir = getcwd();
my_env = environ;
chdir(out_dir);
if not compile_only:
ret = subprocess.call(["./buildconf --with-apr=" + deps_dir +"/apr-src --with-apr-util=" + deps_dir + "/apr-util-src"], shell = True, env = my_env);
if (ret != 0):
print "Failed to run buildconf!";
chdir(ori_dir);
exit(1);
cmd = "./configure --with-apr=" + deps_dir + "/apr-build --with-apr-util=" + deps_dir + "/apr-util-build";
ret = subprocess.call([cmd], shell = True, env = my_env);
if (ret != 0):
print "Failed to run configure!";
chdir(ori_dir);
print "Executed: cmd";
print cmd;
exit(1);
subprocess.call(["make", "clean"], shell = True, env = my_env);
if not config_only:
if paraj == 0:
ret = subprocess.call(["make"], env = my_env);
else:
ret = subprocess.call(["make", "-j", str(paraj)], env = my_env);
if ret != 0:
print "Failed to make!";
chdir(ori_dir);
exit(1);
chdir(ori_dir);
if __name__ == "__main__":
deps_dir = getcwd() + "/apache-deps";
compile_only = False;
config_only = False;
paraj = 0;
dryrun_src = "";
opts, args = getopt.getopt(argv[1:], 'cd:j:p:');
for o, a in opts:
if o == "-d":
dryrun_src = a;
elif o == "-j":
paraj = int(a);
elif o == "-p":
if a[0] == "/":
deps_dir = a;
else:
deps_dir = getcwd() + "/" + a;
elif o == "-c":
compile_only = True;
print deps_dir;
out_dir = args[0];
if (path.exists(out_dir)):
print "Working with existing directory: " + out_dir;
else:
print "Non-exist directory";
exit(1);
compileit(out_dir, compile_only, config_only, paraj);
if dryrun_src != "":
(builddir, buildargs) = extract_arguments(out_dir, dryrun_src);
if len(args) > 1:
out_file = open(args[1], "w");
print >> out_file, builddir;
print >> out_file, buildargs;
out_file.close();
else:
print builddir;
print buildargs;
| mit | 2,086,195,526,570,263,600 | 31.757576 | 156 | 0.5572 | false |
praekelt/jmbo-contact | contact/models.py | 1 | 1051 | from django.contrib.auth.models import User
from django.db import models
from preferences.models import Preferences
class ContactPreferences(Preferences):
__module__ = 'preferences.models'
telephone = models.CharField(
max_length=24,
blank=True,
null=True,
)
fax = models.CharField(
max_length=24,
blank=True,
null=True,
)
physical_address = models.TextField(
blank=True,
null=True,
)
postal_address = models.TextField(
blank=True,
null=True,
)
email = models.EmailField(
blank=True,
null=True,
)
sms = models.CharField(
max_length=24,
blank=True,
null=True,
)
email_recipients = models.ManyToManyField(
User,
blank=True,
null=True,
help_text='Select users who will recieve emails sent via the \
general contact form.'
)
class Meta:
verbose_name = 'Contact preferences'
verbose_name_plural = 'Contact preferences'
| bsd-3-clause | 1,976,256,016,569,341,200 | 21.361702 | 70 | 0.596575 | false |
nealtodd/wagtail | wagtail/images/views/serve.py | 3 | 3400 | import base64
import hashlib
import hmac
import imghdr
from wsgiref.util import FileWrapper
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpResponse, HttpResponsePermanentRedirect, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import classonlymethod
from django.utils.encoding import force_text
from django.views.generic import View
from wagtail.images import get_image_model
from wagtail.images.exceptions import InvalidFilterSpecError
from wagtail.images.models import SourceImageIOError
from wagtail.utils.sendfile import sendfile
def generate_signature(image_id, filter_spec, key=None):
if key is None:
key = settings.SECRET_KEY
# Key must be a bytes object
if isinstance(key, str):
key = key.encode()
# Based on libthumbor hmac generation
# https://github.com/thumbor/libthumbor/blob/b19dc58cf84787e08c8e397ab322e86268bb4345/libthumbor/crypto.py#L50
url = '{}/{}/'.format(image_id, filter_spec)
return force_text(base64.urlsafe_b64encode(hmac.new(key, url.encode(), hashlib.sha1).digest()))
def verify_signature(signature, image_id, filter_spec, key=None):
return force_text(signature) == generate_signature(image_id, filter_spec, key=key)
def generate_image_url(image, filter_spec, viewname='wagtailimages_serve', key=None):
signature = generate_signature(image.id, filter_spec, key)
url = reverse(viewname, args=(signature, image.id, filter_spec))
url += image.file.name[len('original_images/'):]
return url
class ServeView(View):
model = get_image_model()
action = 'serve'
key = None
@classonlymethod
def as_view(cls, **initkwargs):
if 'action' in initkwargs:
if initkwargs['action'] not in ['serve', 'redirect']:
raise ImproperlyConfigured("ServeView action must be either 'serve' or 'redirect'")
return super(ServeView, cls).as_view(**initkwargs)
def get(self, request, signature, image_id, filter_spec):
if not verify_signature(signature.encode(), image_id, filter_spec, key=self.key):
raise PermissionDenied
image = get_object_or_404(self.model, id=image_id)
# Get/generate the rendition
try:
rendition = image.get_rendition(filter_spec)
except SourceImageIOError:
return HttpResponse("Source image file not found", content_type='text/plain', status=410)
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
return getattr(self, self.action)(rendition)
def serve(self, rendition):
# Open and serve the file
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
return StreamingHttpResponse(FileWrapper(rendition.file),
content_type='image/' + image_format)
def redirect(self, rendition):
# Redirect to the file's public location
return HttpResponsePermanentRedirect(rendition.url)
serve = ServeView.as_view()
class SendFileView(ServeView):
backend = None
def serve(self, rendition):
return sendfile(self.request, rendition.file.path, backend=self.backend)
| bsd-3-clause | -6,663,978,026,707,857,000 | 34.789474 | 114 | 0.707353 | false |
toros-astro/ProperImage | properimage/utils.py | 1 | 10039 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright 2016 Bruno S <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
"""utils module from ProperImage,
for coadding astronomical images.
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
[email protected]
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
import os
import astroalign as aa
import numpy as np
import scipy.ndimage as ndimage
from astropy.io import fits
from astropy.modeling import fitting, models
from astropy.stats import sigma_clipped_stats
from numpy.lib.recfunctions import append_fields
from scipy import sparse
from scipy.spatial import cKDTree
aa.PIXEL_TOL = 0.3
aa.NUM_NEAREST_NEIGHBORS = 5
aa.MIN_MATCHES_FRACTION = 0.6
def store_img(img, path=None):
if isinstance(img[0, 0], np.complex):
img = img.real
if isinstance(img, np.ma.core.MaskedArray):
mask = img.mask.astype("int")
data = img.data
hdu_data = fits.PrimaryHDU(data)
hdu_data.scale(type="float32")
hdu_mask = fits.ImageHDU(mask, uint="uint8")
hdu_mask.header["IMG_TYPE"] = "BAD_PIXEL_MASK"
hdu = fits.HDUList([hdu_data, hdu_mask])
else:
hdu = fits.PrimaryHDU(img)
if path is not None:
hdu.writeto(path, overwrite=True)
else:
return hdu
def crossmatch(X1, X2, max_distance=np.inf):
"""Cross-match the values between X1 and X2
By default, this uses a KD Tree for speed.
Parameters
----------
X1 : array_like
first dataset, shape(N1, D)
X2 : array_like
second dataset, shape(N2, D)
max_distance : float (optional)
maximum radius of search. If no point is within the given radius,
then inf will be returned.
Returns
-------
dist, ind: ndarrays
The distance and index of the closest point in X2 to each point in X1
Both arrays are length N1.
Locations with no match are indicated by
dist[i] = inf, ind[i] = N2
"""
X1 = np.asarray(X1, dtype=float)
X2 = np.asarray(X2, dtype=float)
N1, D = X1.shape
N2, D2 = X2.shape
if D != D2:
raise ValueError("Arrays must have the same second dimension")
kdt = cKDTree(X2)
dist, ind = kdt.query(X1, k=1, distance_upper_bound=max_distance)
return dist, ind
def _matching(master, cat, masteridskey=None, radius=1.5, masked=False):
"""Function to match stars between frames."""
if masteridskey is None:
masterids = np.arange(len(master))
master["masterindex"] = masterids
idkey = "masterindex"
else:
idkey = masteridskey
masterXY = np.empty((len(master), 2), dtype=np.float64)
masterXY[:, 0] = master["x"]
masterXY[:, 1] = master["y"]
imXY = np.empty((len(cat), 2), dtype=np.float64)
imXY[:, 0] = cat["x"]
imXY[:, 1] = cat["y"]
dist, ind = crossmatch(masterXY, imXY, max_distance=radius)
dist_, ind_ = crossmatch(imXY, masterXY, max_distance=radius)
IDs = np.zeros_like(ind_) - 13133
for i in range(len(ind_)):
if dist_[i] != np.inf:
ind_o = ind_[i]
if dist[ind_o] != np.inf:
ind_s = ind[ind_o]
if ind_s == i:
IDs[i] = master[idkey][ind_o]
if masked:
mask = IDs > 0
return (IDs, mask)
return IDs
def transparency(images, master=None):
"""Transparency calculator, using Ofek method."""
if master is None:
p = len(images)
master = images[0]
imglist = images[1:]
else:
# master is a separated file
p = len(images) + 1
imglist = images
mastercat = master.best_sources
try:
mastercat = append_fields(
mastercat,
"sourceid",
np.arange(len(mastercat)),
usemask=False,
dtypes=int,
)
except ValueError:
pass
detect = np.repeat(True, len(mastercat))
# Matching the sources
for img in imglist:
newcat = img.best_sources
ids, mask = _matching(
mastercat,
newcat,
masteridskey="sourceid",
radius=2.0,
masked=True,
)
try:
newcat = append_fields(newcat, "sourceid", ids, usemask=False)
except ValueError:
newcat["sourceid"] = ids
for i in range(len(mastercat)):
if mastercat[i]["sourceid"] not in ids:
detect[i] = False
newcat.sort(order="sourceid")
img.update_sources(newcat)
try:
mastercat = append_fields(
mastercat, "detected", detect, usemask=False, dtypes=bool
)
except ValueError:
mastercat["detected"] = detect
# Now populating the vector of magnitudes
q = sum(mastercat["detected"])
if q != 0:
m = np.zeros(p * q)
# here 20 is a common value for a zp, and is only for weighting
m[:q] = (
-2.5 * np.log10(mastercat[mastercat["detected"]]["flux"]) + 20.0
)
j = 0
for row in mastercat[mastercat["detected"]]:
for img in imglist:
cat = img.best_sources
imgrow = cat[cat["sourceid"] == row["sourceid"]]
m[q + j] = -2.5 * np.log10(imgrow["flux"]) + 20.0
j += 1
master.update_sources(mastercat)
ident = sparse.identity(q)
col = np.repeat(1.0, q)
sparses = []
for j in range(p):
ones_col = np.zeros((q, p))
ones_col[:, j] = col
sparses.append([sparse.csc_matrix(ones_col), ident])
H = sparse.bmat(sparses)
P = sparse.linalg.lsqr(H, m)
zps = P[0][:p]
meanmags = P[0][p:]
return np.asarray(zps), np.asarray(meanmags)
else:
return np.ones(p), np.nan
def _align_for_diff(refpath, newpath, newmask=None):
"""Function to align two images using their paths,
and returning newpaths for differencing.
We will allways rotate and align the new image to the reference,
so it is easier to compare differences along time series.
"""
ref = np.ma.masked_invalid(fits.getdata(refpath))
new = fits.getdata(newpath)
hdr = fits.getheader(newpath)
if newmask is not None:
new = np.ma.masked_array(new, mask=fits.getdata(newmask))
else:
new = np.ma.masked_invalid(new)
dest_file = "aligned_" + os.path.basename(newpath)
dest_file = os.path.join(os.path.dirname(newpath), dest_file)
try:
new2 = aa.register(ref, new.filled(np.median(new)))
except ValueError:
ref = ref.astype(float)
new = new.astype(float)
new2 = aa.register(ref, new)
hdr.set("comment", "aligned img " + newpath + " to " + refpath)
if isinstance(new2, np.ma.masked_array):
hdu = fits.HDUList(
[
fits.PrimaryHDU(new2.data, header=hdr),
fits.ImageHDU(new2.mask.astype("uint8")),
]
)
hdu.writeto(dest_file, overwrite=True)
else:
fits.writeto(dest_file, new2, hdr, overwrite=True)
return dest_file
def _align_for_coadd(imglist):
"""
Function to align a group of images for coadding, it uses
the astroalign `align_image` tool.
"""
ref = imglist[0]
new_list = [ref]
for animg in imglist[1:]:
registrd, registrd_mask = aa.register(
animg.data, ref.data, propagate_mask=True
)
# [: ref.data.shape[0], : ref.data.shape[1]], Deprecated
new_list.append(
type(animg)(registrd, mask=registrd_mask, borders=False)
)
return new_list
def find_S_local_maxima(S_image, threshold=2.5, neighborhood_size=5):
mean, median, std = sigma_clipped_stats(S_image, maxiters=3)
labeled, num_objects = ndimage.label((S_image - mean) / std > threshold)
xy = np.array(
ndimage.center_of_mass(S_image, labeled, range(1, num_objects + 1))
)
cat = []
for x, y in xy:
cat.append((y, x, (S_image[int(x), int(y)] - mean) / std))
return cat
def chunk_it(seq, num):
"""Creates chunks of a sequence suitable for data parallelism using
multiprocessing.
Parameters
----------
seq: list, array or sequence like object. (indexable)
data to separate in chunks
num: int
number of chunks required
Returns
-------
Sorted list.
List of chunks containing the data splited in num parts.
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last) : int(last + avg)])
last += avg
try:
return sorted(out, reverse=True)
except TypeError:
return out
except ValueError:
return out
def fit_gaussian2d(b, fitter=None):
if fitter is None:
fitter = fitting.LevMarLSQFitter()
y2, x2 = np.mgrid[: b.shape[0], : b.shape[1]]
ampl = b.max() - b.min()
p = models.Gaussian2D(
x_mean=b.shape[1] / 2.0,
y_mean=b.shape[0] / 2.0,
x_stddev=1.0,
y_stddev=1.0,
theta=np.pi / 4.0,
amplitude=ampl,
)
p += models.Const2D(amplitude=b.min())
out = fitter(p, x2, y2, b, maxiter=1000)
return out
| bsd-3-clause | 2,111,238,457,958,437,600 | 27.278873 | 77 | 0.591892 | false |
HybridF5/jacket | jacket/tests/storage/unit/api/contrib/test_snapshot_manage.py | 1 | 5611 | # Copyright (c) 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from jacket import context
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit.api import fakes
from jacket.tests.storage.unit import fake_service
def app():
# no auth, just let environ['storage.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def volume_get(self, context, volume_id, viewable_admin_meta=False):
if volume_id == 'fake_volume_id':
return {'id': 'fake_volume_id', 'name': 'fake_volume_name',
'host': 'fake_host'}
raise exception.VolumeNotFound(volume_id=volume_id)
@mock.patch('storage.volume.api.API.get', volume_get)
class SnapshotManageTest(test.TestCase):
"""Test cases for storage/api/contrib/snapshot_manage.py
The API extension adds a POST /os-snapshot-manage API that is passed a
storage volume id, and a driver-specific reference parameter.
If everything is passed correctly,
then the storage.volume.api.API.manage_existing_snapshot method
is invoked to manage an existing storage object on the host.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into storage.volume.api.API.manage_existing_snapshot
with the correct arguments.
"""
def _get_resp(self, body):
"""Helper to execute an os-snapshot-manage API call."""
req = webob.Request.blank('/v2/fake/os-snapshot-manage')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['storage.context'] = context.RequestContext('admin',
'fake',
True)
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('storage.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
@mock.patch('storage.volume.api.API.create_snapshot_in_db')
@mock.patch('storage.db.service_get_by_args')
def test_manage_snapshot_ok(self, mock_db,
mock_create_snapshot, mock_rpcapi):
"""Test successful manage volume execution.
Tests for correct operation when valid arguments are passed in the
request body. We ensure that storage.volume.api.API.manage_existing got
called with the correct arguments, and that we return the correct HTTP
code to the caller.
"""
ctxt = context.RequestContext('admin', 'fake', True)
mock_db.return_value = fake_service.fake_service_obj(
ctxt,
binary='storage-volume')
body = {'snapshot': {'volume_id': 'fake_volume_id', 'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(202, res.status_int, res)
# Check the db.service_get_by_host_and_topic was called with correct
# arguments.
self.assertEqual(1, mock_db.call_count)
args = mock_db.call_args[0]
self.assertEqual('fake_host', args[1])
# Check the create_snapshot_in_db was called with correct arguments.
self.assertEqual(1, mock_create_snapshot.call_count)
args = mock_create_snapshot.call_args[0]
named_args = mock_create_snapshot.call_args[1]
self.assertEqual('fake_volume_id', args[1].get('id'))
# We should commit quota in storage-volume layer for this operation.
self.assertFalse(named_args['commit_quota'])
# Check the compute_rpcapi.manage_existing_snapshot was called with
# correct arguments.
self.assertEqual(1, mock_rpcapi.call_count)
args = mock_rpcapi.call_args[0]
self.assertEqual('fake_ref', args[2])
def test_manage_snapshot_missing_volume_id(self):
"""Test correct failure when volume_id is not specified."""
body = {'snapshot': {'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_missing_ref(self):
"""Test correct failure when the ref is not specified."""
body = {'snapshot': {'volume_id': 'fake_volume_id'}}
res = self._get_resp(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_error_body(self):
"""Test correct failure when body is invaild."""
body = {'error_snapshot': {'volume_id': 'fake_volume_id'}}
res = self._get_resp(body)
self.assertEqual(400, res.status_int)
def test_manage_snapshot_error_volume_id(self):
"""Test correct failure when volume can't be found."""
body = {'snapshot': {'volume_id': 'error_volume_id',
'ref': 'fake_ref'}}
res = self._get_resp(body)
self.assertEqual(404, res.status_int)
| apache-2.0 | 4,292,069,018,880,063,000 | 41.18797 | 79 | 0.648726 | false |
seung-lab/cloud-volume | cloudvolume/datasource/precomputed/image/rx.py | 1 | 11090 | from functools import partial
import itertools
import math
import os
import threading
import numpy as np
from six.moves import range
from tqdm import tqdm
from cloudfiles import reset_connection_pools, CloudFiles, compression
import fastremap
from ....exceptions import EmptyVolumeException, EmptyFileException
from ....lib import (
mkdir, clamp, xyzrange, Vec,
Bbox, min2, max2, check_bounds,
jsonify, red
)
from .... import chunks
from cloudvolume.scheduler import schedule_jobs
from cloudvolume.threaded_queue import DEFAULT_THREADS
from cloudvolume.volumecutout import VolumeCutout
import cloudvolume.sharedmemory as shm
from ..common import should_compress, content_type
from .common import (
fs_lock, parallel_execution,
chunknames, shade, gridpoints,
compressed_morton_code
)
from .. import sharding
def download_sharded(
requested_bbox, mip,
meta, cache, spec,
compress, progress,
fill_missing,
order
):
full_bbox = requested_bbox.expand_to_chunk_size(
meta.chunk_size(mip), offset=meta.voxel_offset(mip)
)
full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)
chunk_size = meta.chunk_size(mip)
grid_size = np.ceil(meta.bounds(mip).size3() / chunk_size).astype(np.uint32)
reader = sharding.ShardReader(meta, cache, spec)
bounds = meta.bounds(mip)
renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)
gpts = list(gridpoints(full_bbox, bounds, chunk_size))
code_map = {}
morton_codes = compressed_morton_code(gpts, grid_size)
for gridpoint, morton_code in zip(gpts, morton_codes):
cutout_bbox = Bbox(
bounds.minpt + gridpoint * chunk_size,
min2(bounds.minpt + (gridpoint + 1) * chunk_size, bounds.maxpt)
)
code_map[morton_code] = cutout_bbox
all_chunkdata = reader.get_data(list(code_map.keys()), meta.key(mip), progress=progress)
for zcode, chunkdata in all_chunkdata.items():
cutout_bbox = code_map[zcode]
if chunkdata is None:
if fill_missing:
chunkdata = None
else:
raise EmptyVolumeException(cutout_bbox)
img3d = decode(
meta, cutout_bbox,
chunkdata, fill_missing, mip
)
shade(renderbuffer, requested_bbox, img3d, cutout_bbox)
return VolumeCutout.from_volume(
meta, mip, renderbuffer,
requested_bbox
)
def download(
requested_bbox, mip,
meta, cache,
fill_missing, progress,
parallel, location,
retain, use_shared_memory,
use_file, compress, order='F',
green=False, secrets=None,
renumber=False, background_color=0
):
"""Cutout a requested bounding box from storage and return it as a numpy array."""
full_bbox = requested_bbox.expand_to_chunk_size(
meta.chunk_size(mip), offset=meta.voxel_offset(mip)
)
full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
cloudpaths = list(chunknames(
full_bbox, meta.bounds(mip),
meta.key(mip), meta.chunk_size(mip),
protocol=meta.path.protocol
))
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)
handle = None
if renumber and (parallel != 1):
raise ValueError("renumber is not supported for parallel operation.")
if use_shared_memory and use_file:
raise ValueError("use_shared_memory and use_file are mutually exclusive arguments.")
dtype = np.uint16 if renumber else meta.dtype
if parallel == 1:
if use_shared_memory: # write to shared memory
handle, renderbuffer = shm.ndarray(
shape, dtype=dtype, order=order,
location=location, lock=fs_lock
)
if not retain:
shm.unlink(location)
elif use_file: # write to ordinary file
handle, renderbuffer = shm.ndarray_fs(
shape, dtype=dtype, order=order,
location=location, lock=fs_lock,
emulate_shm=False
)
if not retain:
os.unlink(location)
else:
renderbuffer = np.full(shape=shape, fill_value=background_color,
dtype=dtype, order=order)
def process(img3d, bbox):
shade(renderbuffer, requested_bbox, img3d, bbox)
remap = { background_color: background_color }
lock = threading.Lock()
N = 1
def process_renumber(img3d, bbox):
nonlocal N
nonlocal lock
nonlocal remap
nonlocal renderbuffer
img_labels = fastremap.unique(img3d)
with lock:
for lbl in img_labels:
if lbl not in remap:
remap[lbl] = N
N += 1
if N > np.iinfo(renderbuffer.dtype).max:
renderbuffer = fastremap.refit(renderbuffer, value=N, increase_only=True)
fastremap.remap(img3d, remap, in_place=True)
shade(renderbuffer, requested_bbox, img3d, bbox)
fn = process
if renumber and not (use_file or use_shared_memory):
fn = process_renumber
download_chunks_threaded(
meta, cache, mip, cloudpaths,
fn=fn, fill_missing=fill_missing,
progress=progress, compress_cache=compress_cache,
green=green, secrets=secrets, background_color=background_color
)
else:
handle, renderbuffer = multiprocess_download(
requested_bbox, mip, cloudpaths,
meta, cache, compress_cache,
fill_missing, progress,
parallel, location, retain,
use_shared_memory=(use_file == False),
order=order,
green=green,
secrets=secrets,
background_color=background_color
)
out = VolumeCutout.from_volume(
meta, mip, renderbuffer,
requested_bbox, handle=handle
)
if renumber:
return (out, remap)
return out
def multiprocess_download(
requested_bbox, mip, cloudpaths,
meta, cache, compress_cache,
fill_missing, progress,
parallel, location,
retain, use_shared_memory, order,
green, secrets=None, background_color=0
):
cloudpaths_by_process = []
length = int(math.ceil(len(cloudpaths) / float(parallel)) or 1)
for i in range(0, len(cloudpaths), length):
cloudpaths_by_process.append(
cloudpaths[i:i+length]
)
cpd = partial(child_process_download,
meta, cache, mip, compress_cache,
requested_bbox,
fill_missing, progress,
location, use_shared_memory,
green, secrets, background_color
)
parallel_execution(cpd, cloudpaths_by_process, parallel, cleanup_shm=location)
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
if use_shared_memory:
mmap_handle, renderbuffer = shm.ndarray(
shape, dtype=meta.dtype, order=order,
location=location, lock=fs_lock
)
else:
handle, renderbuffer = shm.ndarray_fs(
shape, dtype=meta.dtype, order=order,
location=location, lock=fs_lock,
emulate_shm=False
)
if not retain:
if use_shared_memory:
shm.unlink(location)
else:
os.unlink(location)
return mmap_handle, renderbuffer
def child_process_download(
meta, cache, mip, compress_cache,
dest_bbox,
fill_missing, progress,
location, use_shared_memory, green,
secrets, background_color, cloudpaths
):
reset_connection_pools() # otherwise multi-process hangs
shape = list(dest_bbox.size3()) + [ meta.num_channels ]
if use_shared_memory:
array_like, dest_img = shm.ndarray(
shape, dtype=meta.dtype,
location=location, lock=fs_lock
)
else:
array_like, dest_img = shm.ndarray_fs(
shape, dtype=meta.dtype,
location=location, emulate_shm=False,
lock=fs_lock
)
if background_color != 0:
dest_img[dest_bbox.to_slices()] = background_color
def process(src_img, src_bbox):
shade(dest_img, dest_bbox, src_img, src_bbox)
download_chunks_threaded(
meta, cache, mip, cloudpaths,
fn=process, fill_missing=fill_missing,
progress=progress, compress_cache=compress_cache,
green=green, secrets=secrets, background_color=background_color
)
array_like.close()
def download_chunk(
meta, cache,
cloudpath, mip,
filename, fill_missing,
enable_cache, compress_cache,
secrets, background_color
):
(file,) = CloudFiles(cloudpath, secrets=secrets).get([ filename ], raw=True)
content = file['content']
if enable_cache:
cache_content = next(compression.transcode(file, compress_cache))['content']
CloudFiles('file://' + cache.path).put(
path=filename,
content=(cache_content or b''),
content_type=content_type(meta.encoding(mip)),
compress=compress_cache,
raw=bool(cache_content),
)
del cache_content
if content is not None:
content = compression.decompress(content, file['compress'])
bbox = Bbox.from_filename(filename) # possible off by one error w/ exclusive bounds
img3d = decode(meta, filename, content, fill_missing, mip,
background_color=background_color)
return img3d, bbox
def download_chunks_threaded(
meta, cache, mip, cloudpaths, fn,
fill_missing, progress, compress_cache,
green=False, secrets=None, background_color=0
):
locations = cache.compute_data_locations(cloudpaths)
cachedir = 'file://' + os.path.join(cache.path, meta.key(mip))
def process(cloudpath, filename, enable_cache):
img3d, bbox = download_chunk(
meta, cache, cloudpath, mip,
filename, fill_missing,
enable_cache, compress_cache,
secrets, background_color
)
fn(img3d, bbox)
local_downloads = (
partial(process, cachedir, os.path.basename(filename), False) for filename in locations['local']
)
remote_downloads = (
partial(process, meta.cloudpath, filename, cache.enabled) for filename in locations['remote']
)
downloads = itertools.chain( local_downloads, remote_downloads )
if progress and not isinstance(progress, str):
progress = "Downloading"
schedule_jobs(
fns=downloads,
concurrency=DEFAULT_THREADS,
progress=progress,
total=len(cloudpaths),
green=green,
)
def decode(meta, input_bbox, content, fill_missing, mip, background_color=0):
"""
Decode content from bytes into a numpy array using the
dataset metadata.
If fill_missing is True, return an array filled with background_color
if content is empty. Otherwise, raise an EmptyVolumeException
in that case.
Returns: ndarray
"""
bbox = Bbox.create(input_bbox)
content_len = len(content) if content is not None else 0
if not content:
if fill_missing:
content = b''
else:
raise EmptyVolumeException(input_bbox)
shape = list(bbox.size3()) + [ meta.num_channels ]
try:
return chunks.decode(
content,
encoding=meta.encoding(mip),
shape=shape,
dtype=meta.dtype,
block_size=meta.compressed_segmentation_block_size(mip),
background_color=background_color
)
except Exception as error:
print(red('File Read Error: {} bytes, {}, {}, errors: {}'.format(
content_len, bbox, input_bbox, error)))
raise
| bsd-3-clause | 5,789,913,100,231,406,000 | 27.656331 | 101 | 0.67358 | false |
tangentlabs/tangent-deployer | src/tangentdeployer/aws/elb.py | 1 | 4028 | import json
import utils
import boto.ec2.elb
from fabconfig import env
from fabric.api import local
def get_or_create_load_balancer():
utils.status("Getting %s load balancer" % env.environment)
load_balancer = get(load_balancer_name=env.load_balancer_name)
if not load_balancer:
return create_load_balancer()
return load_balancer
def create_load_balancer():
load_balancer = env.connections.elb.create_load_balancer(
name=env.load_balancer_name,
zones=env.zones,
security_groups=utils.security_groups(),
complex_listeners=[('80', '80', 'http', 'http')]
)
utils.success('Finished creating load balancer')
health_check = create_health_check()
load_balancer.configure_health_check(health_check=health_check)
return load_balancer
def create_health_check():
utils.status('Creating health check for load balancer')
health_check = boto.ec2.elb.HealthCheck(
interval=10,
healthy_threshold=2,
unhealthy_threshold=3,
target='HTTP:80/health')
utils.success('Finished creating health check for load balancer')
return health_check
def register_instances(load_balancer, autoscaling_group):
instances = [
instance.instance_id
for instance in autoscaling_group.instances
]
env.connections.elb.register_instances(
load_balancer_name=load_balancer.name, instances=instances)
def deregister_instances(load_balancer, autoscaling_group):
instances = [
instance.instance_id
for instance in autoscaling_group.instances
]
env.connections.elb.deregister_instances(
load_balancer_name=load_balancer.name, instances=instances)
def get(load_balancer_name):
utils.status('Getting %s load balancer' % env.environment)
try:
load_balancers = env.connections.elb.get_all_load_balancers(
load_balancer_names=[env.load_balancer_name])
except boto.exception.BotoServerError:
return None
return load_balancers[0]
def has_tag(load_balancer_name, key, value):
"""
We fall back to using the AWS CLI tool here because boto doesn't
support adding tags to load balancers yet.
As soon as https://github.com/boto/boto/issues/2549 is merged we're good
to change this to use boto.
"""
response = json.loads(local(
'aws elb describe-tags '
'--load-balancer-names %s '
'--region=%s --profile=%s' % (load_balancer_name,
env.region,
env.profile_name),
capture=True))
in_env = False
if 'TagDescriptions' in response:
for tag_description in response['TagDescriptions']:
for tag in tag_description['Tags']:
if tag['Key'] == 'env' and tag['Value'] == env.environment:
in_env = True
for tag in tag_description['Tags']:
if tag['Key'] == 'type' and tag['Value'] == value and in_env:
return True
return False
def tag(load_balancer, tags):
"""
We fall back to using the AWS CLI tool here because boto doesn't
support adding tags to load balancers yet.
As soon as https://github.com/boto/boto/issues/2549 is merged we're good
to change this to use boto
"""
utils.status('Tagging load balancer')
tags = make_tags(tags=tags)
local('aws elb add-tags '
'--load-balancer-names {lb_name} '
'--tags {tags} '
'--region={region} '
'--profile={profile_name}'.format(lb_name=load_balancer.name,
tags=tags,
region=env.region,
profile_name=env.profile_name)
)
utils.success('Finished tagging load balancer')
def make_tags(tags):
return ' '.join(
'Key={key},Value={value}'.format(key=key, value=value)
for key, value in tags.iteritems()
)
| mit | -4,524,741,475,131,346,400 | 31.483871 | 77 | 0.617428 | false |
Blazemeter/taurus | tests/resources/apiritif/test_codegen.py | 1 | 2578 | # coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
log = logging.getLogger('apiritif.http')
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(logging.DEBUG)
class TestWithExtractors(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 5.0
self.target = apiritif.http.target('https://jsonplaceholder.typicode.com')
self.target.keep_alive(True)
self.target.auto_assert_ok(True)
self.target.use_cookies(True)
self.target.allow_redirects(True)
self.target.timeout(5.0)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False, scenario_name='with-extractors')
def _1_just_get(self):
with apiritif.smart_transaction('just get'):
response = self.target.get('/')
def _2_get_posts(self):
with apiritif.smart_transaction('get posts'):
response = self.target.get('/posts')
response.assert_jsonpath('$.[0].userId', expected_value=1)
self.vars['userID'] = response.extract_jsonpath('$.[5].userId', 'NOT_FOUND')
def _3_get_posts_of_certain_user(self):
with apiritif.smart_transaction('get posts of certain user'):
response = self.target.get('/posts?userId={}'.format(self.vars['userID']))
self.vars['postID'] = response.extract_jsonpath('$.[0].id', 'NOT_FOUND')
def _4_get_comments_on_post(self):
with apiritif.smart_transaction('get comments on post'):
response = self.target.get('/posts/{}/comments'.format(self.vars['postID']))
response.assert_jsonpath('$[0].email', expected_value=None)
def _5_add_into_posts(self):
with apiritif.smart_transaction('add into posts'):
response = self.target.post('/posts', headers={
'content-type': 'application/json',
}, json={
'body': 'bar',
'title': 'foo',
'userId': self.vars['userID'],
})
self.vars['addedID'] = response.extract_jsonpath('$.id', 'NOT_FOUND')
def _6_delete_from_posts(self):
with apiritif.smart_transaction('delete from posts'):
response = self.target.delete('/posts/{}'.format(self.vars['postID']))
def test_with_extractors(self):
self._1_just_get()
self._2_get_posts()
self._3_get_posts_of_certain_user()
self._4_get_comments_on_post()
self._5_add_into_posts()
self._6_delete_from_posts()
| apache-2.0 | 2,061,219,892,558,965,800 | 34.315068 | 105 | 0.611715 | false |
jureslak/racunalniske-delavnice | fmf/python_v_divjini/projekt/test/test_game.py | 1 | 1623 | from tictactoe import game, player
import unittest
from unittest import mock
class GameTest(unittest.TestCase):
def setUp(self):
self.num_of_players = 2
self.width = 3
self.height = 3
self.game = game.Game(2, 3, 3)
def test_init(self):
self.assertEqual(self.game.board, None)
self.assertEqual(self.game.width, self.width)
self.assertEqual(self.game.height, self.height)
self.assertEqual(self.game.num_of_players, self.num_of_players)
self.assertEqual(self.game.players, [])
self.assertEqual(self.game.round_counter, 0)
self.assertEqual(self.game.on_turn, 0)
def test_setup(self):
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
expected = [('Luke', 'x'), ('Leia', 'o')]
for e, p in zip(expected, self.game.players):
self.assertEqual(p.name, e[0])
self.assertEqual(p.symbol, e[1])
def test_play_round(self):
# setup
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
input_seq = ['2', '5', '3', '1', '9', '6', '7', '4']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.play_round()
finished, winner = self.game.board.finished()
self.assertTrue(finished)
self.assertEqual(winner, 1)
expected_board = [[1, 0, 0], [1, 1, 1], [0, None, 0]]
self.assertEqual(self.game.board.grid, expected_board)
| gpl-2.0 | -3,384,628,318,615,924,700 | 35.066667 | 71 | 0.583487 | false |
bengosney/romrescue.org | team/migrations/0005_auto_20161029_1857.py | 1 | 1667 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-29 18:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('team', '0004_teammember_job'),
]
operations = [
migrations.CreateModel(
name='DogPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='uploads/teamdogs')),
('thumbnail', image_cropping.fields.ImageRatioField('image', '150x150', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='thumbnail')),
('position', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ('position',),
'verbose_name': 'Photo',
'verbose_name_plural': 'Photos',
},
),
migrations.AlterField(
model_name='teammember',
name='cropped',
field=image_cropping.fields.ImageRatioField('image', '400x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='cropped'),
),
migrations.AddField(
model_name='dogphoto',
name='TeamMember',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='team.TeamMember'),
),
]
| gpl-3.0 | 2,396,542,858,550,056,400 | 39.658537 | 236 | 0.604079 | false |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/definition_lazy_loader.py | 1 | 1619 | # lint-amnesty, pylint: disable=missing-module-docstring
import copy
from opaque_keys.edx.locator import DefinitionLocator
class DefinitionLazyLoader(object):
"""
A placeholder to put into an xblock in place of its definition which
when accessed knows how to get its content. Only useful if the containing
object doesn't force access during init but waits until client wants the
definition. Only works if the modulestore is a split mongo store.
"""
def __init__(self, modulestore, course_key, block_type, definition_id, field_converter):
"""
Simple placeholder for yet-to-be-fetched data
:param modulestore: the pymongo db connection with the definitions
:param definition_locator: the id of the record in the above to fetch
"""
self.modulestore = modulestore
self.course_key = course_key
self.definition_locator = DefinitionLocator(block_type, definition_id)
self.field_converter = field_converter
def fetch(self):
"""
Fetch the definition. Note, the caller should replace this lazy
loader pointer with the result so as not to fetch more than once
"""
# get_definition may return a cached value perhaps from another course or code path
# so, we copy the result here so that updates don't cross-pollinate nor change the cached
# value in such a way that we can't tell that the definition's been updated.
definition = self.modulestore.get_definition(self.course_key, self.definition_locator.definition_id)
return copy.deepcopy(definition)
| agpl-3.0 | -1,342,814,386,339,033,600 | 45.257143 | 108 | 0.704756 | false |
Antergos/Cnchi | src/widgets/webcam_widget.py | 1 | 5550 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webcam_widget.py
#
# Copyright © 2013-2018 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Widget that shows a web camera feed """
import logging
import os
import gi
gi.require_version('Gst', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gst, Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:
gi.require_version('GstVideo', '1.0')
from gi.repository import GdkX11, GstVideo
class WebcamWidget(Gtk.DrawingArea):
""" Webcam widget """
__gtype_name__ = 'WebcamWidget'
def __init__(self, width=160, height=90):
Gtk.DrawingArea.__init__(self)
self.pipeline = None
self.xid = None
self.bus = None
self.error = False
if not os.path.exists("/dev/video0"):
logging.warning("Cannot find any camera. Camera widget won't be used")
self.error = True
self.destroy()
return
self.set_size_request(width, height)
# Initialize GStreamer
Gst.init(None)
self.create_video_pipeline(width, height)
self.connect('destroy', self.on_destroy)
def create_video_pipeline(self, width, height):
""" Create GStreamer pipeline """
# Create pipeline
self.pipeline = Gst.Pipeline.new()
# Create bus to get events from GStreamer pipeline
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::error', self.on_error)
# This is needed to make the video output in our DrawingArea:
self.bus.enable_sync_message_emission()
self.bus.connect('sync-message::element', self.on_sync_message)
# Create GStreamer elements
self.source = Gst.ElementFactory.make('autovideosrc', 'source')
self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
if self.source and self.sink:
#fmt_str = 'video/x-raw, format=(string)YV12, '
fmt_str = 'video/x-raw, '
fmt_str += 'width=(int){0}, height=(int){1}, '.format(width, height)
fmt_str += 'pixel-aspect-ratio=(fraction)1/1, '
fmt_str += 'interlace-mode=(string)progressive, '
fmt_str += 'framerate=(fraction){ 30/1, 24/1, 20/1, 15/1, 10/1, 15/2, 5/1 }'
caps = Gst.caps_from_string(fmt_str)
# Add elements to the pipeline
self.pipeline.add(self.source)
self.pipeline.add(self.sink)
self.source.link_filtered(self.sink, caps)
logging.debug("Camera found. Video pipeline created.")
else:
logging.debug("Cannot initialize camera.")
self.error = True
def show_all(self):
""" You need to get the XID after window.show_all(). You shouldn't get it
in the on_sync_message() handler because threading issues will cause
segfaults there. """
self.xid = self.get_property('window').get_xid()
if self.pipeline:
# Start playing
self.pipeline.set_state(Gst.State.PLAYING)
def on_destroy(self, _data):
""" Widget is destroyed. Stop playing """
if self.pipeline:
# Stop playing
self.pipeline.set_state(Gst.State.NULL)
self.destroy()
def on_sync_message(self, _bus, msg):
""" This is needed to make the video output in our DrawingArea """
if msg.get_structure().get_name() == 'prepare-window-handle':
msg.src.set_property('force-aspect-ratio', True)
msg.src.set_window_handle(self.xid)
@staticmethod
def on_error(_bus, msg):
""" A gst error has occurred """
logging.error(msg.parse_error())
def clicked(self, _event_box, _event_button):
""" User clicks on camera widget """
pass
GObject.type_register(WebcamWidget)
def test_module():
""" function to test this module """
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.set_title("Webcam test")
window.set_default_size(160, 90)
window.connect("destroy", Gtk.main_quit, "WM destroy")
vbox = Gtk.VBox()
window.add(vbox)
overlay = Gtk.Overlay.new()
overlay.show()
webcam = WebcamWidget()
event_box = Gtk.EventBox.new()
event_box.connect(
'button-press-event',
webcam.clicked)
overlay.add_overlay(event_box)
event_box.add(webcam)
webcam.set_halign(Gtk.Align.START)
webcam.set_valign(Gtk.Align.START)
vbox.add(overlay)
window.show_all()
webcam.show_all()
GObject.threads_init()
Gtk.main()
if __name__ == '__main__':
test_module()
| gpl-3.0 | -5,490,614,462,187,439,000 | 30.890805 | 88 | 0.631465 | false |
NPPC-UK/wireless_sensors | setup_logger/setup_logger.py | 1 | 7621 | #!/usr/bin/env python
# Copyright (C) 2017 Aberystwyth University
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# setup_logger.py
# Author: Katie Awty-Carroll ([email protected])
#
import serial
import MySQLdb
import sys
from datetime import datetime
"""
setup_node.py - Python 2.7.6 script for setting up nodes.
This script is designed to fetch OneWire sensor ID's from a node, and add them to the database. The node must be running setup_node.ino.
This script has very little error checking at the moment so be careful.
"""
print "Setting up..."
#The directory where the config file is stored
parser.read("config")
#Fetch database details from config file
db_host = parser.get("db_connect", "host")
db_user = parser.get("db_connect", "user")
db_pass = parser.get("db_connect", "password")
db_schema = parser.get("db_connect", "schema")
#Clear input buffer, because menu will print
def clear_menu(serial_conn):
for i in range(0, 6):
serial_conn.readline()
#Set up the SQL statement to add a sensor location
def get_location_data(node_id, sensor_id, sensor_order):
loc_statement = None
print "Sensor X location: "
x_loc = raw_input()
if x_loc.isdigit():
print "X location is OK"
print "Sensor Y location: "
y_loc = raw_input()
if y_loc.isdigit():
print "Y location is OK"
print "Sensor Z location: "
z_loc = raw_input()
if z_loc.isdigit():
print "Z location is OK"
print "Sensor compartment: "
comp = raw_input()
if comp.isdigit():
print "Compartment number is OK"
print "Sensor network ID: "
network_id = raw_input()
if network_id.isdigit():
print "Network ID is OK"
loc_statement = "INSERT INTO locations (location_sensor_id, x_location, y_location, z_location, compartment, node_id, node_order, network) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');" % (sensor_id, x_loc, y_loc, z_loc, comp, node_id, sensor_order, network_id)
else:
print "Network ID is not numeric"
else:
print "Compartment number is not numeric"
else:
print "Z location is not numeric"
else:
print "Y location is not numeric"
else:
print "X location is not numeric"
return loc_statement
#Set up the SQL statement to add a sensor calibration record
def get_calibration_data(sensor_id):
cal_statement = None
print "Calibration date (YYYY-MM-DD): "
cal_date = raw_input()
try:
val_date = datetime.strptime(cal_date, '%Y-%m-%d')
print "Equation (e.g. x*2/(12-1.5)): "
cal_equation = raw_input()
try:
x = 1
equation_res = eval(cal_equation)
print "Instrument used to calibrate: "
cal_instrument = raw_input()
print "Who performed the calibration: "
cal_person = raw_input()
cal_statement = "INSERT INTO calibration_data VALUES (equation_sensor_id, calibration_date, equation, calibration_instrument, who_calibrated) VALUES ('%s', '%s', '%s', '%s', '%s')" % (sensor_id, cal_date, cal_equation, cal_instrument, cal_person)
except SyntaxError:
print "Equation cannot be evaluated - check your syntax"
except ValueError:
print "Date needs to be in YYYY-MM-DD format"
return cal_statement
def main():
calibration_flag = "NO"
#Connect to serial port so that we can communicate with the Moteino
try:
serial_conn = serial.Serial("/dev/ttyUSB0", 115200)
print "Connected to serial port"
clear_menu(serial_conn)
except OSError as e:
print "ERROR: Could not open serial port: %s" % (e)
sys.exit(1)
try:
#Connect to database
db = MySQLdb.connect(db_host, db_user, db_pass, db_schema)
#Set up cursor to fetch data
cursor = db.cursor()
print "Connected to database"
print "Fetching Node ID..."
serial_conn.write('6')
node_id = serial_conn.readline()
print "Node ID is " + node_id
#Check that the node ID is within range
if int(node_id) > 1 and int(node_id) < 255:
clear_menu(serial_conn)
print "Fetching OneWire sensors from node..."
#Send instruction to get OneWire sensor data
serial_conn.write('7')
#Fetch reply
num_sensors = serial_conn.read(1)
print "Number of sensors in EEPROM: " + num_sensors
#Check that the number of sensors is within range
if int(num_sensors) > 0 and int(num_sensors) <= 5:
for i in range(1, int(num_sensors)+1):
sensor_addr = serial_conn.readline()
print "Received address of device " + str(i)
sensor_addr = sensor_addr.strip()
print sensor_addr
print "Date of purchase for this sensor (YYYY-MM-DD): "
dop = raw_input()
print "Has this sensor been calibrated? (y/n) "
if_calibrated = raw_input()
if if_calibrated == "y":
calibration_flag = "YES"
print "Preparing sensor SQL statement..."
add_sensor = "INSERT INTO sensors (manufacturer, calibrated, sensor_type, measurement_unit, date_purchased, serial_number) VALUES ('Dallas OneWire', '%s', 'temperature', 'degrees celsius', '%s', '%s');" % (calibration_flag, dop, sensor_addr)
cursor.execute(add_sensor)
#Commit the change so that we can then fetch the sensor ID
db.commit()
#The ID of the sensor we just added will be the highest value in the auto incrementing column
cursor.execute("SELECT sensor_id FROM sensors ORDER BY sensor_id DESC LIMIT 1;")
sensor_id = cursor.fetchone()
#Add location data
print "Add location data? (y/n) "
if_location = raw_input()
if if_location == "y":
#Add location data
location_record = get_location_data(node_id, sensor_id[0], i)
if location_record != None:
print "Adding location data"
cursor.execute(location_record)
else:
print "Invalid location data"
if calibration_flag == "YES":
#Calibration flag has been set to YES, so add calibration data
"Calibration data needs to be added for this sensor"
calibration_record = get_calibration_data(sensor_id)
if calibration_record != None:
print "Adding calibration data"
cursor.execute(calibration_record)
else:
#User entered values are probably incorrect. Check if the user wants to change the calibration flag to NO
print "Invalid calibration data. Set calibrated field to NO? (y/n) "
if_reset = raw_input()
if if_reset == "y":
update_cal = "UPDATE sensors SET calibrated = 'NO' WHERE sensor_id = '%s'" % (sensor_id)
cursor.execute(update_cal)
else:
print "Warning: Calibrated flag is set to YES, but no calibration data has been added"
#Commit calibration and location data
db.commit()
print "Changes to database have been committed"
print "Done"
else:
print "Invalid number of sensors"
else:
print "Node ID is invalid or has not been set"
#Catch any errors associated with accessing the database
except MySQLdb.Error as e:
print "***ERROR***: Database error: {} {}" % (e[0], e[1])
db.rollback()
finally:
db.close()
main()
| gpl-3.0 | -9,138,549,356,838,000,000 | 27.436567 | 278 | 0.668023 | false |
googleads/googleads-python-lib | examples/ad_manager/v202105/custom_field_service/deactivate_all_line_item_custom_fields.py | 1 | 2682 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deactivates all active line items custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v202105')
# Create statement to select only active custom fields that apply to
# line items.
statement = (ad_manager.StatementBuilder(version='v202105')
.Where('entityType = :entityType and isActive = :isActive')
.WithBindVariable('entityType', 'LINE_ITEM')
.WithBindVariable('isActive', True))
custom_fields_deactivated = 0
# Get custom fields by statement.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Display results.
for custom_field in response['results']:
print('Custom field with ID "%s" and name "%s" will'
' be deactivated.' % (custom_field['id'], custom_field['name']))
result = custom_field_service.performCustomFieldAction(
{'xsi_type': 'DeactivateCustomFields'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
custom_fields_deactivated += int(result['numChanges'])
statement.offset += statement.limit
else:
break
if custom_fields_deactivated > 0:
print('Number of custom fields deactivated: %s' % custom_fields_deactivated)
else:
print('No custom fields were deactivated.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 | 8,414,907,480,595,389,000 | 35.739726 | 80 | 0.709918 | false |
Rogentos/argent-anaconda | installclasses/awesome.py | 1 | 2687 | #
# awesome.py
#
# Copyright (C) 2010 Fabio Erculiani
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from installclass import BaseInstallClass
from constants import *
from product import *
from flags import flags
import os, types
import iutil
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import installmethod
from sabayon import Entropy
from sabayon.livecd import LiveCDCopyBackend
class InstallClass(BaseInstallClass):
id = "sabayon_awesome"
name = N_("Rogentos Awesome")
_pixmap_dirs = os.getenv("PIXMAPPATH", "/usr/share/pixmaps").split(":")
for _pix_dir in _pixmap_dirs:
_pix_path = os.path.join(_pix_dir, "awesome.png")
if os.path.isfile(_pix_path):
pixmap = _pix_path
dmrc = "awesome"
if Entropy().is_sabayon_steambox():
dmrc = "steambox"
_description = N_("Select this installation type for a default installation "
"with the Awesome desktop environment. "
"A small lightweight and functional working environment at your service.")
_descriptionFields = (productName,)
sortPriority = 10000
if not Entropy().is_installed("x11-wm/awesome"):
hidden = 1
def configure(self, anaconda):
BaseInstallClass.configure(self, anaconda)
BaseInstallClass.setDefaultPartitioning(self,
anaconda.storage, anaconda.platform)
def setSteps(self, anaconda):
BaseInstallClass.setSteps(self, anaconda)
anaconda.dispatch.skipStep("welcome", skip = 1)
#anaconda.dispatch.skipStep("network", skip = 1)
def getBackend(self):
return LiveCDCopyBackend
def productMatches(self, oldprod):
if oldprod is None:
return False
if oldprod.startswith(productName):
return True
return False
def versionMatches(self, oldver):
try:
oldVer = float(oldver)
newVer = float(productVersion)
except ValueError:
return True
return newVer >= oldVer
def __init__(self):
BaseInstallClass.__init__(self)
| gpl-2.0 | 2,908,679,965,218,311,700 | 28.527473 | 83 | 0.679196 | false |
jgyates/genmon | gentankdiy.py | 1 | 6642 | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# FILE: gentankdiy.py
# PURPOSE: gentankdiy.py add enhanced external tank data to genmon
#
# AUTHOR: jgyates
# DATE: 06-18-2019
#
# MODIFICATIONS:
#-------------------------------------------------------------------------------
import datetime, time, sys, signal, os, threading, collections, json, ssl
import atexit, getopt, requests
try:
from genmonlib.myclient import ClientInterface
from genmonlib.mylog import SetupLogger
from genmonlib.myconfig import MyConfig
from genmonlib.mysupport import MySupport
from genmonlib.mycommon import MyCommon
from genmonlib.mythread import MyThread
from genmonlib.program_defaults import ProgramDefaults
from genmonlib.gaugediy import GaugeDIY1, GaugeDIY2
import smbus
except Exception as e1:
print("\n\nThis program requires the modules located in the genmonlib directory in the github repository.\n")
print("Please see the project documentation at https://github.com/jgyates/genmon.\n")
print("Error: " + str(e1))
sys.exit(2)
#------------ GenTankData class ------------------------------------------------
class GenTankData(MySupport):
#------------ GenTankData::init---------------------------------------------
def __init__(self,
log = None,
loglocation = ProgramDefaults.LogPath,
ConfigFilePath = MyCommon.DefaultConfPath,
host = ProgramDefaults.LocalHost,
port = ProgramDefaults.ServerPort,
console = None):
super(GenTankData, self).__init__()
self.LogFileName = os.path.join(loglocation, "gentankdiy.log")
self.AccessLock = threading.Lock()
self.log = log
self.console = console
self.MonitorAddress = host
configfile = os.path.join(ConfigFilePath, 'gentankdiy.conf')
try:
if not os.path.isfile(configfile):
self.LogConsole("Missing config file : " + configfile)
self.LogError("Missing config file : " + configfile)
sys.exit(1)
self.config = MyConfig(filename = configfile, section = 'gentankdiy', log = self.log)
self.gauge_type = self.config.ReadValue('gauge_type', return_type = int, default = 1)
self.nb_tanks = self.config.ReadValue('nb_tanks', return_type = int, default = 1)
if self.MonitorAddress == None or not len(self.MonitorAddress):
self.MonitorAddress = ProgramDefaults.LocalHost
except Exception as e1:
self.LogErrorLine("Error reading " + configfile + ": " + str(e1))
self.LogConsole("Error reading " + configfile + ": " + str(e1))
sys.exit(1)
try:
if self.gauge_type == 1:
self.gauge = GaugeDIY1(self.config, log = self.log, console = self.console)
elif self.gauge_type == 2:
self.gauge = GaugeDIY2(self.config, log = self.log, console = self.console)
else:
self.LogError("Invalid gauge type: " + str(self.gauge_type))
sys.exit(1)
if not self.nb_tanks in [1,2]:
self.LogError("Invalid Number of tanks (nb_tanks), 1 or 2 accepted: " + str(self.nb_tanks))
sys.exit(1)
self.debug = self.gauge.debug
self.Generator = ClientInterface(host = self.MonitorAddress, port = port, log = self.log)
# start thread monitor time for exercise
self.Threads["TankCheckThread"] = MyThread(self.TankCheckThread, Name = "TankCheckThread", start = False)
if not self.gauge.InitADC():
self.LogError("InitADC failed, exiting")
sys.exit(1)
self.Threads["TankCheckThread"].Start()
signal.signal(signal.SIGTERM, self.SignalClose)
signal.signal(signal.SIGINT, self.SignalClose)
except Exception as e1:
self.LogErrorLine("Error in GenTankData init: " + str(e1))
self.console.error("Error in GenTankData init: " + str(e1))
sys.exit(1)
#---------- GenTankData::SendCommand --------------------------------------
def SendCommand(self, Command):
if len(Command) == 0:
return "Invalid Command"
try:
with self.AccessLock:
data = self.Generator.ProcessMonitorCommand(Command)
except Exception as e1:
self.LogErrorLine("Error calling ProcessMonitorCommand: " + str(Command))
data = ""
return data
# ---------- GenTankData::TankCheckThread-----------------------------------
def TankCheckThread(self):
time.sleep(1)
while True:
try:
dataforgenmon = {}
tankdata = self.gauge.GetGaugeData()
if tankdata != None:
dataforgenmon["Tank Name"] = "External Tank"
dataforgenmon["Capacity"] = 0
dataforgenmon["Percentage"] = tankdata
if self.nb_tanks == 2:
tankdata2 = self.gauge.GetGaugeData(tanktwo = True)
if tankdata2 != None:
dataforgenmon["Percentage2"] = tankdata2
retVal = self.SendCommand("generator: set_tank_data=" + json.dumps(dataforgenmon))
self.LogDebug(retVal)
if self.WaitForExit("TankCheckThread", float(self.gauge.PollTime * 60)):
return
except Exception as e1:
self.LogErrorLine("Error in TankCheckThread: " + str(e1))
if self.WaitForExit("TankCheckThread", float(self.gauge.PollTime * 60)):
return
# ----------GenTankData::SignalClose----------------------------------------
def SignalClose(self, signum, frame):
self.Close()
sys.exit(1)
# ----------GenTankData::Close----------------------------------------------
def Close(self):
self.KillThread("TankCheckThread")
self.gauge.Close()
self.Generator.Close()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
console, ConfigFilePath, address, port, loglocation, log = MySupport.SetupAddOnProgram("gentankdiy")
GenTankDataInstance = GenTankData(log = log, loglocation = loglocation, ConfigFilePath = ConfigFilePath, host = address, port = port, console = console)
while True:
time.sleep(0.5)
sys.exit(1)
| gpl-2.0 | 5,548,805,097,713,764,000 | 36.954286 | 156 | 0.549082 | false |
Jazende/ProjectEuler | problem_026.py | 1 | 2481 | import math
def go_until_repeat_remainder(nom, den, cur_max=1000):
remainders = []
cycles = 0
while True:
if nom < den:
nom*=10
cycles += 1
if nom == den:
break
if nom > den:
remainder = nom%den
if remainder in remainders:
cycles += 1
break
remainders.append(remainder)
nom*=10
cycles += 1
return cycles
def problem_026(max_=1000):
cur_max = 0
cur_value = 0
for x in range(2, 1000)[::-1]:
new_value = go_until_repeat_remainder(1, x, cur_max)
if new_value > cur_max:
cur_max = new_value
cur_value = x
return cur_value
print(problem_026(1000))
def long_division(nom, den, max_count=100000000):
result = "0."
nom *= 10
count = 0
while True:
if find_recurring(result):
temp = float(result)
if den*0.9 < int(1 / temp) < den *1.1:
break
if nom % den == 0:
result += str(nom//den)
break
elif nom > den:
result += str(nom//den)
nom = nom%den
nom *= 10
continue
elif nom < den:
result += "0"
nom *= 10
continue
count += 1
if count == max_count:
break
return result
def find_recurring(text):
rev_text = text[::-1]
for i in range(1, len(text)//2+1)[::-1]:
if rev_text[:i] == rev_text[i:i*2] == rev_text[i*2:i*3] == rev_text[i*3:i*4] and not int(rev_text[:i]) == 0:
return True
return False
def get_recurring(text):
rev_text = text[::-1]
for i in range(1, len(text)//2+1)[::-1]:
if rev_text[:i] == rev_text[i:i*2] == rev_text[i*2:i*3] == rev_text[i*3:i*4] and not int(rev_text[:i]) == 0:
return rev_text[:i]
def get_recurring_length(nom, den):
division = long_division(nom, den)
if find_recurring(division):
return len(get_recurring(division))
else:
return 0
def problem_26(target):
# fractions = {x: get_recurring_length(1, x) for x in range(2, target+1)}
fractions = []
for x in range(2, target+1):
fractions.append([x, get_recurring_length(1, x)])
fractions = sorted(fractions, key=lambda x: x[1], reverse=True)
print(fractions[:10])
return fractions[0]
problem_26(1000)
#print(long_division(1, 261))
| gpl-3.0 | -6,844,096,892,728,083,000 | 26.566667 | 116 | 0.512696 | false |
leewinder/tslerp | automation/prepare_distribution_package.py | 1 | 2629 | """ Builds up a release package ready to be built or distributed by NPM. The distributable content
is taken from the development folder to make it easier to strip out unneeded package content. """
#!/usr/bin/python
# Imports
import os
import shutil
import fnmatch
import distutils.dir_util
import cli
#
# Finds all files with a specific extension
#
def remove_all_files(directory, extension):
""" Finds all files with a specific extension """
# Delete everything in the source folders
for root, _, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, extension):
file_path = os.path.join(root, filename)
os.remove(file_path)
#
# Removes all the build files so we can do a clean build
#
def clean_build_files():
""" Removes all the build files so we can do a clean build """
# Get our path
source_folder = cli.get_project_root() + '/development/src'
remove_all_files(source_folder, '*.js')
remove_all_files(source_folder, '*.js.map')
remove_all_files(source_folder, '*.d.ts')
#
# Builds the Typescript project
#
def build_project():
""" Builds the Typescript project """
config_root = cli.get_project_root() + '/development/'
return_code, _, _ = cli.run_command_line(config_root, "tsc", ['-p', 'tsconfig-ci.json'])
if return_code != 0:
exit(return_code)
#
# Gets the main package folder
#
def create_package_folder():
""" Gets the main package folder """
# Get the path to the distribution package
root_path = cli.get_project_root() + '/release'
if os.path.exists(root_path):
shutil.rmtree(root_path)
distribution_folder = '/{}/package'.format(root_path)
os.makedirs(distribution_folder)
# Send it back with the root folder
return cli.get_project_root() + '/', distribution_folder + '/'
#
# Main entry function
#
def main():
""" Main entry function """
# Clean up our current build files
clean_build_files()
# Build the project
build_project()
# Get our folder
root_folder, distribution_folder = create_package_folder()
# Copy over the root content
shutil.copyfile(root_folder + 'LICENSE', distribution_folder + 'LICENSE')
shutil.copyfile(root_folder + 'README.md', distribution_folder + 'README.md')
# Package content
shutil.copyfile(root_folder + 'development/package.json', distribution_folder + 'package.json')
# Copy over all the source files
distutils.dir_util.copy_tree(root_folder + 'development/src/lib', distribution_folder)
#
# Main entry point
#
if __name__ == "__main__":
main()
| mit | 491,645,362,754,421,000 | 26.103093 | 99 | 0.669836 | false |
zigazupancic/sat-solver | boolean.py | 1 | 4960 | class Formula:
def __ne__(self, other):
return not (self == other)
def flatten(self):
return self
def getVariable(self, mapping):
if self not in mapping:
mapping[self] = freshVariable()
return mapping[self]
class Variable(Formula):
def __init__(self, x):
self.x = x
def __str__(self, parentheses=False):
return str(self.x)
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
if isinstance(other, Formula):
return isinstance(other, Variable) and self.x == other.x
else:
return self.x == other
def __lt__(self, other):
return self.__str__() < other.__str__()
def evaluate(self, values):
return values[self.x]
def simplify(self):
return self
def tseytin(self, mapping):
return self
def equiv(self, variable):
return And(Or(variable, Not(self)), Or(Not(variable), self))
def listing(self):
return [self.x]
class Not(Formula):
def __init__(self, x):
self.x = makeFormula(x)
def __str__(self, parentheses=False):
return "!" + self.x.__str__(True)
def __hash__(self):
return hash(("!", self.x))
def __eq__(self, other):
return isinstance(other, Not) and self.x == other.x
def __lt__(self, other):
return self.__str__() <= self.__str__()
def evaluate(self, values):
return not self.x.evaluate(values)
def flatten(self):
if isinstance(self.x, Not):
return self.x.x
else:
return self
def simplify(self):
if isinstance(self.x, And):
return Or(*(Not(y) for y in self.x.terms)).simplify()
elif isinstance(self.x, Or):
return And(*(Not(y) for y in self.x.terms)).simplify()
elif isinstance(self.x, Variable):
return self
else:
return self.flatten().simplify()
def tseytin(self, mapping):
return Not(self.x.tseytin(mapping)).getVariable(mapping)
def equiv(self, variable):
return And(Or(variable, self.x), Or(Not(variable), self))
def listing(self):
return [self.flatten().simplify()]
class Multi(Formula):
def __init__(self, *args):
self.terms = frozenset(makeFormula(x) for x in args)
def __str__(self, parentheses = False):
if len(self.terms) == 0:
return self.empty
elif len(self.terms) == 1:
return next(iter(self.terms)).__str__(parentheses)
out = self.connective.join(x.__str__(True) for x in self.terms)
if parentheses:
return "(%s)" % out
else:
return out
def __hash__(self):
return hash((self.connective, self.terms))
def __eq__(self, other):
return isinstance(other, self.getClass()) \
and self.terms == other.terms
def evaluate(self, values):
return self.fun(x.evaluate(values) for x in self.terms)
def flatten(self):
this = self.getClass()
terms = (x.flatten() for x in self.terms)
out = this(*sum([list(x.terms) if isinstance(x, this)
else [x] for x in terms], []))
if len(out.terms) == 1:
return next(iter(out.terms))
else:
return out
def simplify(self):
terms = [x.simplify() for x in self.terms]
const = self.getDualClass()()
if const in terms:
return const
if len(terms) == 1:
return terms[0]
return self.getClass()(*terms).flatten()
def tseytin(self, mapping):
return self.getClass()(*(x.tseytin(mapping)
for x in self.terms)).getVariable(mapping)
def listing(self):
return [y.flatten().simplify() for y in self.terms]
class And(Multi):
empty = "T"
connective = r" & "
fun = all
def getClass(self):
return And
def getDualClass(self):
return Or
def equiv(self, variable):
return And(Or(variable, *(Not(x).flatten() for x in self.terms)),
*(Or(Not(variable), x) for x in self.terms))
class Or(Multi):
empty = "F"
connective = r" | "
fun = any
def getClass(self):
return Or
def getDualClass(self):
return And
def equiv(self, variable):
return And(Or(Not(variable), *self.terms),
*(Or(variable, Not(x)) for x in self.terms))
T = And()
F = Or()
def makeFormula(x):
if isinstance(x, Formula):
return x
else:
return Variable(x)
counter = 0
def freshVariable():
global counter
counter += 1
return Variable("x{}".format(counter))
def tseytin(formula, mapping=None):
if mapping is None:
mapping = {}
f = formula.tseytin(mapping)
return And(f, *(k.equiv(v) for k, v in mapping.items())).flatten()
| mit | 3,495,161,264,798,552,000 | 23.554455 | 73 | 0.55 | false |
mozillazg/lark | lark/lark/settings.py | 1 | 1098 | """
Django settings for lark project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from .base import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')i4@2vfr##+zd3cn8ckw#!lebya1mk2sg@yq9boog+=ofi@hf9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'fm.3sd.me']
SECRET_KEY = os.environ['LARK_SECRET_KEY']
ADMIN_URL = os.environ['LARK_ADMIN_URL']
DB_NAME = os.environ['LARK_DB_NAME']
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, DB_NAME),
}
}
| mit | 7,931,855,443,845,683,000 | 27.153846 | 71 | 0.716758 | false |
bodacea/datasciencefordevelopment | Code_examples/python/linear_regression.py | 1 | 1984 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis]
diabetes_X_temp = diabetes_X[:, :, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X_temp[:-20]
diabetes_X_test = diabetes_X_temp[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show() | cc0-1.0 | 6,711,675,007,412,280,000 | 28.191176 | 77 | 0.700101 | false |
bi4o4ek/yandex-loganalytics | loganalytics.py | 1 | 3271 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This script calculates 95th percentile for request time and shows top 10 requests ID with max send time to customers
#
# Start example:
# ./loganalytics.py /path-to-log/input.txt > /path-to-some-dir/output.txt
# then you can complete analysis by running 2nd script
# ./granalytics.py /path-to-log/input.txt >> /path-to-some-dir/output.txt
#
# If you do not set path to log, then default location will be used (default_log_path)
import sys
import math
import heapq
__author__ = 'Vladimir Bykanov'
default_log_path = '/home/bi4o4ek/yaTest/input.txt'
# Pure python func for percentile calculation
# http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
# Dict with id:timestamp of type StartRequest
start_stamps = {}
# Dict with id:timestamp of type StartSendResult
send_stamps = {}
# Dict with id:timestamp of type FinishRequest
finish_stamps = {}
# List with send time of each request
send_times = {}
# List with full time of each request
req_times = []
# Read log path from 1st script parameter
if len(sys.argv) > 1:
log_path = sys.argv[1]
else:
log_path = default_log_path
# Parse log and harvest info into start_stamps, send_stamps and finish_stamps
with open(log_path) as log_handler:
for line in log_handler:
line_elements = line.split()
req_stamp, req_id, req_type = line_elements[:3]
if req_type == 'StartRequest':
start_stamps[req_id] = int(req_stamp)
elif req_type == 'StartSendResult':
send_stamps[req_id] = int(req_stamp)
elif req_type == 'FinishRequest':
finish_stamps[req_id] = int(req_stamp)
# Numbers of StartRequest, StartSendResult and FinishRequest must be equal
if len(start_stamps) != len(finish_stamps) or len(finish_stamps) != len(send_stamps) :
print 'Numbers of StartRequest, StartSendResult and FinishRequest are not equal each other'
exit(3)
# Compute full times of requests and send times to customer
for req_id in start_stamps:
# Full times
req_time = int(finish_stamps[req_id]) - int(start_stamps[req_id])
req_times.append(req_time)
# Send times
send_time = int(finish_stamps[req_id]) - int(send_stamps[req_id])
send_times[req_id] = send_time
req_times.sort()
print "95-й перцентиль времени работы:", percentile(req_times, 0.95)
send_times_top10 = heapq.nlargest(10, send_times, key = send_times.get)
print "Идентификаторы запросов с самой долгой фазой отправки результатов пользователю:"
print ' ', ', '.join(map(str, send_times_top10))
| apache-2.0 | -2,882,829,045,544,299,000 | 32.442105 | 118 | 0.678628 | false |
e-gun/HipparchiaBuilder | builder/parsers/regexsubstitutions.py | 1 | 29163 | # -*- coding: utf-8 -*-
"""
HipparchiaBuilder: compile a database of Greek and Latin texts
Copyright: E Gunderson 2016-21
License: GNU GENERAL PUBLIC LICENSE 3
(see LICENSE in the top level directory of the distribution)
"""
from string import punctuation
from typing import List
import configparser
import re
from builder.parsers.betacodeescapedcharacters import percentsubstitutes, quotesubstitutesa, quotesubstitutesb
from builder.parsers.betacodefontshifts import latinauthorandshiftparser
from builder.parsers.citationbuilder import citationbuilder
from builder.parsers.swappers import bitswapchars, hextohighunicode, highunicodetohex, hutohxgrouper
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf8')
if config['buildoptions']['warnings'] == 'y':
warnings = True
else:
warnings = False
# [nb: some regex happens in dbloading.py as prep for loading]
def earlybirdsubstitutions(texttoclean):
"""
try to get out in front of some of the trickiest bits
note that you can't use quotation marks in here
:param texttoclean:
:return:
"""
if config['buildoptions']['smartsinglequotes'] == 'y':
# 'smart' single quotes; but this produces an intial elision problem for something like ’κείνων which will be ‘κείνων instead
supplement = [
(r'\s\'', r' ‘'),
(r'\'( |\.|,|;)', r'’\1')
]
else:
# single quotes are a problem because OTOH, we have elision at the first letter of the word and, OTOH, we have plain old quotes
# full width variant for now
supplement = [(r'\'', r''')]
betacodetuples = [
(r'<(?!\d)', r'‹'), # '<': this one is super-dangerous: triple-check
(r'>(?!\d)', u'›'), # '>': this one is super-dangerous: triple-check
(r'_', u' \u2014 '), # doing this without spaces was producing problems with giant 'hyphenated' line ends
(r'\\\{', r'❴'),
(r'\\\}', r'❵'),
# the papyri exposed an interesting problem with '?'
# let's try to deal with this at earlybirdsubstitutions() because if you let '?' turn into '\u0323' it seems impossible to undo that
#
# many papyrus lines start like: '[ &c ? ]$' (cf. '[ &c ? $TO\ PRA=]GMA')
# this will end up as: '[ <hmu_latin_normal>c ̣ ]</hmu_latin_normal>'
# the space after '?' is not always there
# '[ &c ?]$! KEKEI/NHKA DI/KH PERI\ U(/BREWS [4!!!!!!!!!![ &c ?]4 ]$'
# also get a version of the pattern that does not have '[' early because we are not starting a line:
# '&{10m4}10 [ c ? ]$IASNI#80 *)EZIKEH\ M[ARTURW= &c ? ]$'
# this one also fails to have '&c' because the '&' came earlier
# here's hoping there is no other way to achieve this pattern...
(r'&c\s\?(.*?)\$', r'&c ﹖\1$'), # the question mark needs to be preserved, so we substitute a small question mark
(r'\[\sc\s\?(.*?)\$', r'[ c ﹖\1$'), # try to catch '&{10m4}10 [ c ? ]$I' without doing any damage
(r'&\?(.*?)\](.*?)\$', r'&﹖\1]\2$') # some stray lonely '?' cases remain
]
betacodetuples += supplement
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
return texttoclean
def replacequotationmarks(texttoclean):
"""
purge " markup
:param texttoclean:
:return:
"""
quotes = re.compile(r'\"(\d{1,2})')
texttoclean = re.sub(quotes, quotesubstitutesa, texttoclean)
texttoclean = re.sub(r'\"(.*?)\"', r'“\1”', texttoclean)
quotes = re.compile(r'QUOTE(\d)(.*?)QUOTE(\d)')
texttoclean = re.sub(quotes, quotesubstitutesb, texttoclean)
return texttoclean
def lastsecondsubsitutions(texttoclean):
"""
regex work that for some reason or other needs to be put off until the very last second
:param texttoclean:
:return:
"""
# gr2762 and chr0012 will fail the COPY TO command because of '\\'
texttoclean = texttoclean.replace('\\', '')
betacodetuples = (
# a format shift code like '[3' if followed by a number that is supposed to print has an intervening ` to stop the TLG parser
# if you do this prematurely you will generate spurious codes by joining numbers that should be kept apart
(r'`(\d)', r'\1'),
(r'\\\(', r'('),
(r'\\\)', r')'),
)
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
if config['buildoptions']['simplifybrackets'] != 'n':
tosimplify = re.compile(r'[❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎]')
texttoclean = re.sub(tosimplify, bracketsimplifier, texttoclean)
# change:
# <span class="latin smallerthannormal">Gnom. Vatic. 743 [</span>
# into:
# <span class="latin smallerthannormal">Gnom. Vatic. 743 </span>[
bracketandspan = re.compile(r'([❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎⟨⟩\[\](){}])(</span>)')
texttoclean = re.sub(bracketandspan, r'\2\1', texttoclean)
spanandbracket = re.compile(r'(<span class="[^"]*?">)([❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎⟨⟩\[\](){}])')
texttoclean = re.sub(spanandbracket, r'\2\1', texttoclean)
# be careful not to delete whole lines: [^"]*? vs .*?
voidspan = re.compile(r'<span class="[^"]*?"></span> ')
texttoclean = re.sub(voidspan, r'', texttoclean)
# combining double inverted breve is misplaced: <3 >3
# combining breve below is misplaced: <4 >4
# combining breve (035d) ?: <5 >5
swaps = re.compile(u'(.)([\u035c\u035d\u0361])')
texttoclean = re.sub(swaps, r'\2\1', texttoclean)
# misbalanced punctuation in something like ’αὐλῶνεϲ‘: a trivial issue that will add a lot of time to builds if you do all of the variants
# easy enough to turn this off
if config['buildoptions']['smartsinglequotes'] == 'y':
# if you enable the next a problem arises with initial elision: ‘κείνων instead of ’κείνων
texttoclean = re.sub(r'(\W)’(\w)', r'\1‘\2', texttoclean)
# now we try to undo the mess we just created by looking for vowel+space+quote+char
# the assumption is that an actual quotation will have a punctuation mark that will invalidate this check
# Latin is a mess, and you will get too many bad mathces: De uerbo ’quiesco’
# but the following will still be wrong: τὰ ϲπέρματα· ‘κείνων γὰρ
# it is unfixable? how do I know that a proper quote did not just start?
previousendswithvowel = re.compile(r'([aeiouαειουηωᾳῃῳᾶῖῦῆῶάέίόύήώὰὲὶὸὺὴὼἂἒἲὂὒἢὢᾃᾓᾣᾂᾒᾢ]\s)‘(\w)')
texttoclean = re.sub(previousendswithvowel, r'\1’\2', texttoclean)
resized = re.compile(r'[﹖﹡/﹗│﹦﹢﹪﹠﹕']')
texttoclean = re.sub(resized, makepunctuationnormalsized, texttoclean)
texttoclean = re.sub(r'([\w.,;])‘([\W])', r'\1’\2', texttoclean)
texttoclean = re.sub(r'(\W)”(\w)', r'\1“\2', texttoclean)
texttoclean = re.sub(r'([\w.,;])“([\W])', r'\1”\2', texttoclean)
# ['‵', '′'], # reversed prime and prime (for later fixing)
texttoclean = re.sub(r'([\w.,])‵([\W])', r'\1′\2', texttoclean)
texttoclean = re.sub(r'(\W)′(\w)', r'\1‵\2', texttoclean)
texttoclean = re.sub(r'‵', r'‘', texttoclean)
texttoclean = re.sub(r'′', r'’', texttoclean)
return texttoclean
def makepunctuationnormalsized(match):
"""
swap a normal and (﹠) for a little one (&), etc.
:param match:
:return:
"""
val = match.group(0)
substitutions = {
'﹖': '?',
'﹡': '*',
'/': '/',
'﹗': '!',
'│': '|',
'﹦': '=',
'﹢': '+',
'﹪': '%',
'﹠': '&',
'﹕': ':',
''': u'\u0027', # simple apostrophe
}
try:
substitute = substitutions[val]
except KeyError:
substitute = ''
return substitute
def makepunctuationsmall(val):
"""
swap a little and (﹠) for a big one (&), etc.
:param val:
:return:
"""
substitutions = {
'?': '﹖',
'*': '﹡',
'/': '/',
'!': '﹗',
'|': '│',
'=': '﹦',
'+': '﹢',
'%': '﹪',
'&': '﹠',
':': '﹕',
u'\u0027': ''' # simple apostrophe
}
try:
substitute = substitutions[val]
except KeyError:
substitute = ''
return substitute
def bracketsimplifier(match):
"""
lots of brackets are out there; converge upon a smaller set
note that most of them were chosen to avoid confusing the parser, so restoring these puts us
more in line with the betacode manual
comment some of these out to restore biodiversity
:param matchgroup:
:return:
"""
val = match.group(0)
substitutions = {
'❨': '(',
'❩': ')',
'❴': '{',
'❵': '}',
'⟦': '[',
'⟧': ']',
'⦅': '(',
'⦆': ')',
'⸨': '(',
'⸩': ')',
# '₍': '(', # '[11' (enclose missing letter dots (!), expressing doubt whether there is a letter there at all)
# '₎': ')', # '11]'
# various angled brackets all set to 'mathematical left/right angle bracket' (u+27e8, u+27e9)
# alternately one could consider small versions instead of the full-sized versions (u+fe64, u+fe65)
# the main issue is that '<' and '>' are being kept out of the text data because of the HTML problem
# '⟪': '⟨', # but these are all asserted in the betacode
# '⟫': '⟩', # but these are all asserted in the betacode
'《': '⟨',
'》': '⟩',
'‹': '⟨',
'›': '⟩'
}
try:
substitute = substitutions[val]
except KeyError:
substitute = val
return substitute
def swapregexbrackets(val):
"""
get rid of [](){}
insert safe substitutes
currently unused
:param match:
:return:
"""
substitutions = {
'(': '❨',
')': '❩',
'{': '❴',
'}': '❵',
'[': '⟦',
']': '⟧',
}
try:
substitute = substitutions[val]
except KeyError:
substitute = val
return substitute
def debughostilesubstitutions(texttoclean):
"""
all sorts of things will be hard to figure out if you run this suite
but it does make many things 'look better' even if there are underlying problems.
see latinfontlinemarkupparser() for notes on what the problems are/look like
if the $ is part of an irrational 'on-without-off' Greek font toggle, then we don't care
it is anything that does not fit that pattern that is the problem
the hard part is churning through lots of texts looking for ones that do not fit that pattern
at the moment few texts seem to have even the benign toggle issue; still looking for places
where there is a genuine problem
:param texttoclean:
:return:
"""
if config['buildoptions']['hideknownblemishes'] != 'y':
return texttoclean
betacodetuples = [(r'[\$]', r''),]
# note that '&' will return to the text via the hexrunner: it can be embedded in the annotations
# and you will want it later in order to format that material when it hits HipparchiaServer:
# in 'Gel. &3N.A.& 20.3.2' the '&3' turns on italics and stripping & leaves you with 3N.A. (which is hard to deal with)
# $ is still a problem:
# e.g., 0085:
# Der Antiatt. p. 115, 3 Bekk.: ‘ὑδρηλοὺϲ’ $πίθουϲ καὶ ‘οἰνηροὺϲ’
# @&Der Antiatt. p. 115, 3 Bekk.%10 $8’U(DRHLOU\S‘ $PI/QOUS KAI\ $8’OI)NHROU\S‘$
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
return texttoclean
def cleanuplingeringmesses(texttoclean):
"""
we've made it to the bitter end but there is something ugly in the results
here we can clean things up that we are too lazy/stupid/afraid-of-worse to prevent from ending up at this end
:param texttoclean:
:return:
"""
return texttoclean
def bracketspacer(matchgroup):
"""
this is not good:
'[ <hmu_latin_normal>c 27 </hmu_latin_normal>π]όλεωϲ χ⦅αίρειν⦆. ὁμολογῶ'
it should be:
'[(spaces)c 27(spaces)π]όλεωϲ χ⦅αίρειν⦆. ὁμολογῶ'
not too hard to get the spaces right; but they will only display in a compacted manner if sent out as
so you should substitute u'\u00a0' (no-break space)
:param matchgroup:
:return:
"""
grpone = re.sub(r'\s', u'\u00a0', matchgroup.group(1))
grptwo = re.sub(r'\s', u'\u00a0', matchgroup.group(2))
grpthree = re.sub(r'\s', u'\u00a0', matchgroup.group(3))
substitute = '[{x}c{y}]{z}'.format(x=grpone, y=grptwo, z=grpthree)
return substitute
#
# fix problems with the original data
#
def fixhmuoragnizationlinebyline(txt: List[str]) -> List[str]:
"""
the original data has improper nesting of some tags; try to fix that
this is meaningless if you have set htmlifydatabase to 'y' since the 'spanning' will hide the phenomenon
:param txt:
:return:
"""
try:
htmlify = config['buildoptions']['htmlifydatabase']
except KeyError:
htmlify = 'y'
try:
rationalizetags = config['buildoptions']['rationalizetags']
except KeyError:
rationalizetags = 'n'
if htmlify == 'y' or rationalizetags == 'n':
pass
else:
txt = [fixhmuirrationaloragnization(x) for x in txt]
return txt
def fixhmuirrationaloragnization(worlkine: str):
"""
Note the irrationality (for HTML) of the following (which is masked by the 'spanner'):
[have 'EX_ON' + 'SM_ON' + 'EX_OFF' + 'SM_OFF']
[need 'EX_ON' + 'SM_ON' + 'SM_OFF' + 'EX_OFF' + 'SM_ON' + 'SM_OFF' ]
hipparchiaDB=# SELECT index, marked_up_line FROM gr0085 where index = 14697;
index | marked_up_line
-------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------
14697 | <hmu_span_expanded_text><hmu_fontshift_greek_smallerthannormal>τίϲ ἡ τάραξιϲ</hmu_span_expanded_text> τοῦ βίου; τί βάρβιτοϲ</hmu_fontshift_greek_smallerthannormal>
(1 row)
hipparchiaDB=> SELECT index, marked_up_line FROM gr0085 where index = 14697;
index | marked_up_line
-------+---------------------------------------------------------------------------------------------------------------
14697 | <span class="expanded_text"><span class="smallerthannormal">τίϲ ἡ τάραξιϲ</span> τοῦ βίου; τί βάρβιτοϲ</span>
(1 row)
fixing this is an interesting question; it seems likely that I have missed some way of doing it wrong...
but note 'b' below: this is pretty mangled and the output is roughly right...
invalidline = '<hmu_span_expanded_text><hmu_fontshift_greek_smallerthannormal>τίϲ ἡ τάραξιϲ</hmu_span_expanded_text> τοῦ βίου; τί βάρβιτοϲ</hmu_fontshift_greek_smallerthannormal>'
openspans {0: 'span_expanded_text', 24: 'fontshift_greek_smallerthannormal'}
closedspans {76: 'span_expanded_text', 123: 'fontshift_greek_smallerthannormal'}
balancetest [(False, False, True)]
validline = ' <hmu_fontshift_latin_smallcapitals>errantes</hmu_fontshift_latin_smallcapitals><hmu_fontshift_latin_normal> pascentes, ut alibi “mille meae Siculis</hmu_fontshift_latin_normal>'
openspans {36: 'fontshift_latin_smallcapitals', 115: 'fontshift_latin_normal'}
closedspans {79: 'fontshift_latin_smallcapitals', 183: 'fontshift_latin_normal'}
balancetest [(False, True, False)]
# need a third check: or not (open[okeys[x]] == closed[ckeys[x]])
z = ' <hmu_fontshift_latin_normal>II 47.</hmu_fontshift_latin_normal><hmu_fontshift_latin_italic> prognosticorum causas persecuti sunt et <hmu_span_latin_expanded_text>Boëthus Stoicus</hmu_span_latin_expanded_text>,</hmu_fontshift_latin_italic>'
openspans {18: 'fontshift_latin_normal', 81: 'fontshift_latin_italic', 150: 'span_latin_expanded_text'}
closedspans {52: 'fontshift_latin_normal', 195: 'span_latin_expanded_text', 227: 'fontshift_latin_italic'}
balancetest [(False, True, False), (True, False, True)]
a = '[]κακ<hmu_span_superscript>η</hmu_span_superscript> βου<hmu_span_superscript>λ</hmu_span_superscript>'
openspans {5: 'span_superscript', 55: 'span_superscript'}
closedspans {28: 'span_superscript', 78: 'span_superscript'}
balancetest [(False, True, False)]
b = []κακ<hmu_span_superscript>η</hmu_span_superscript> β<hmu_span_x>ο<hmu_span_y>υab</hmu_span_x>c<hmu_span_superscript>λ</hmu_span_y></hmu_span_superscript>
testresult (False, True, False)
testresult (False, False, True)
testresult (False, False, True)
balanced to:
[]κακ<hmu_span_superscript>η</hmu_span_superscript> β<hmu_span_x>ο<hmu_span_y>υab</hmu_span_y></hmu_span_x><hmu_span_y>c<hmu_span_superscript>λ</hmu_span_superscript></hmu_span_y><hmu_span_superscript></hmu_span_superscript>
"""
opener = re.compile(r'<hmu_(span|fontshift)_(.*?)>')
closer = re.compile(r'</hmu_(span|fontshift)_(.*?)>')
openings = list(re.finditer(opener, worlkine))
openspans = {x.span()[0]: '{a}_{b}'.format(a=x.group(1), b=x.group(2)) for x in openings}
closings = list(re.finditer(closer, worlkine))
closedspans = {x.span()[0]: '{a}_{b}'.format(a=x.group(1), b=x.group(2)) for x in closings}
balancetest = list()
invalidpattern = (False, False, True)
if len(openspans) == len(closedspans) and len(openspans) > 1:
# print('openspans', openspans)
# print('closedspans', closedspans)
rng = range(len(openspans) - 1)
okeys = sorted(openspans.keys())
ckeys = sorted(closedspans.keys())
# test 1: a problem if the next open ≠ this close and next open position comes before this close position
# open: {0: 'span_expanded_text', 24: 'fontshift_greek_smallerthannormal'}
# closed: {76: 'span_expanded_text', 123: 'fontshift_greek_smallerthannormal'}
# test 2: succeed if the next open comes after the this close AND the this set of tags match
# open {18: 'fontshift_latin_normal', 81: 'fontshift_latin_italic', 150: 'span_latin_expanded_text'}
# closed {52: 'fontshift_latin_normal', 195: 'span_latin_expanded_text', 227: 'fontshift_latin_italic'}
# test 3: succeed if the next open comes before the previous close
testone = [not (openspans[okeys[x + 1]] != closedspans[ckeys[x]]) and (okeys[x + 1] < ckeys[x]) for x in rng]
testtwo = [okeys[x + 1] > ckeys[x] and openspans[okeys[x]] == closedspans[ckeys[x]] for x in rng]
testthree = [okeys[x + 1] < ckeys[x] for x in rng]
balancetest = [(testone[x], testtwo[x], testthree[x]) for x in rng]
# print('balancetest', balancetest)
if invalidpattern in balancetest:
# print('{a} needs balancing:\n\t{b}'.format(a=str(), b=worlkine))
modifications = list()
balancetest.reverse()
itemnumber = 0
while balancetest:
testresult = balancetest.pop()
if testresult == invalidpattern:
needinsertionat = ckeys[itemnumber]
insertionreopentag = openings[itemnumber + 1].group(0)
insertionclosetag = re.sub(r'<', r'</', openings[itemnumber + 1].group(0))
modifications.append({'item': itemnumber,
'position': needinsertionat,
'closetag': insertionclosetag,
'opentag': insertionreopentag})
itemnumber += 1
newline = str()
placeholder = 0
for m in modifications:
item = m['item']
newline += worlkine[placeholder:m['position']]
newline += m['closetag']
newline += closings[item].group(0)
newline += m['opentag']
placeholder = m['position'] + len(closings[item].group(0))
newline += worlkine[placeholder:]
# print('{a} balanced to:\n\t{b}'.format(a=str(), b=newline))
worlkine = newline
return worlkine
#
# cleanup of the cleaned up: generative citeable texts
#
def totallemmatization(parsedtextfile: List[str]) -> List[str]:
"""
will use decoded hex commands to build a citation value for every line in the text file
can produce a formatted line+citation, but really priming us for the move to the db
note the potential gotcha: some authors have a first work that is not 001 but instead 002+
:param parsedtextfile:
:return: tuples that levelmap+the line
"""
levelmapper = {
# be careful about re returning '1' and not 1
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1
}
dbready = list()
work = 1
setter = re.compile(r'<hmu_set_level_(\d)_to_(.*?)\s/>')
adder = re.compile(r'<hmu_increment_level_(\d)_by_1\s')
wnv = re.compile(r'<hmu_cd_assert_work_number betacodeval="(\d{1,3})')
for line in parsedtextfile:
gotwork = re.search(wnv, line)
if gotwork:
work = int(gotwork.group(1))
for l in range(0, 6):
levelmapper[l] = 1
gotsetting = re.search(setter, line)
if gotsetting:
level = int(gotsetting.group(1))
setting = gotsetting.group(2)
# Euripides (0006) has <hmu_set_level_0_to_post 961 /> after πῶς οὖν ἔτ’ ἂν θνήισκοιμ’ ἂν ἐνδίκως, πόσι,
# 'post 961' becomes a problem: you need to add one to 961, but you will fail 'str(int(setting)'
# slicing at the whitespace will fix this (sort of)
# but then you get a new problem: UPZ (DDP0155) and its new documents '<hmu_set_level_5_to_2 rp />'
# the not so pretty solution of the hour is to build a quasi-condition that is seldom met
# it is almost never true that the split will yield anything other than the original item
# it also is not clear how many other similar cases are out there: 'after 1001', etc.
levelmapper[level] = setting.split('post ')[-1]
if level > 0:
for l in range(0, level):
levelmapper[l] = 1
gotincrement = re.search(adder, line)
# if you don't reset the lower counters, then you will get something like 'line 10' when you first initialize a new section
if gotincrement:
level = int(gotincrement.group(1))
setting = 1
try:
# are we adding integers?
levelmapper[level] = str(int(setting) + int(levelmapper[level]))
except ValueError:
# ok, we are incrementing a letter; hope it's not z+1
# can handle multicharacter strings, but how often is it not "a --> b"?
lastchar = levelmapper[level][-1]
newlastchar = chr(ord(lastchar) + setting)
levelmapper[level] = levelmapper[level][:-1] + newlastchar
# if you increment lvl 1, you need to reset lvl 0
# this is a bit scary because sometimes you get an 0x81 and sometimes you don't
if level > 0:
for l in range(0, level):
levelmapper[l] = 1
# db version: list of tuples + the line
tups = [('0', str(levelmapper[0])), ('1', str(levelmapper[1])), ('2', str(levelmapper[2])), ('3', str(levelmapper[3])), ('4', str(levelmapper[4])), ('5', str(levelmapper[5]))]
dbready.append([str(work), tups, line])
return dbready
def addcdlabels(texttoclean, authornumber):
"""
not totally necessary and a potential source of problems
emerged before hexrunner worked right and not always in agreement with it?
the CD would re-initilize values every block; this turns that info into human-readable info
:param texttoclean:
:param authornumber:
:return:
"""
# cd blocks end 0xf3 + 0x0
# the newline lets you reset levels right?
search = r'(█ⓕⓔ\s(█⓪\s){1,})'
replace = '\n<hmu_end_of_cd_block_re-initialize_key_variables />'
texttoclean = re.sub(search, replace, texttoclean)
template = '█ⓔⓕ █⑧⓪ █ⓑ{one} █ⓑ{two} █ⓑ{three} █ⓑ{four} █ⓕⓕ '
authornumber = hextohighunicode(authornumber)
digits = re.match(r'(.)(.)(.)(.)', authornumber)
search = template.format(one=digits.group(1), two=digits.group(2), three=digits.group(3), four=digits.group(4))
replace = '<hmu_cd_assert_author_number value=\"{v}\"/>'.format(v=highunicodetohex(authornumber))
texttoclean = re.sub(search, replace, texttoclean)
# 'primary level (81)' info stored in a run of 6 bytes:
# 0xef 0x81 0xb0 0xb0 0xb1 0xff
# the NEWLINE here has subtle implications: might need to play with it...
# if you do not then you can include the last ilne of one work in the next...
search = r'(█ⓔⓕ █⑧① █ⓑ(.) █ⓑ(.) █ⓑ(.) █ⓕⓕ )'
replace = r'\n<hmu_cd_assert_work_number betacodeval="\2\3\4"/>'
texttoclean = re.sub(search, replace, texttoclean)
# 'secondary level (82)' info stored in a run of bytes whose length varies: add 127 to them and you get an ascii value
# compare geasciistring() in idt file reader: '& int('7f',16))'
# 0xef 0x82 0xc1 0xf0 0xef 0xec 0xff
# 0xef 0x82 0xcd 0xf5 0xee 0xff
search = r'(█ⓔⓕ\s█⑧②\s((█..\s){1,}?)█ⓕⓕ) '
replace = r'<hmu_cd_assert_work_abbreviation betacodeval="\2"/>'
texttoclean = re.sub(search, replace, texttoclean)
# 'tertiary level (83)' info stored in a run of bytes whose length varies: add 127 to them and you get an ascii value
# 0xef 0x83 0xc1 0xf0 0xf5 0xec 0xff
search = r'(█ⓔⓕ\s█⑧③\s((█..\s){1,}?)█ⓕⓕ) '
replace = r'<hmu_cd_assert_author_abbrev betacodeval="\2"/>'
texttoclean = re.sub(search, replace, texttoclean)
# now reparse
search = r'<hmu_cd_assert_work_number betacodeval="..."/>'
texttoclean = re.sub(search, hutohxgrouper, texttoclean)
search = r'(<hmu_cd_assert_work_abbreviation betacodeval=")(.*?)\s("/>)'
texttoclean = re.sub(search, converthextoascii, texttoclean)
search = r'(<hmu_cd_assert_author_abbrev betacodeval=")(.*?)\s("/>)'
texttoclean = re.sub(search, converthextoascii, texttoclean)
# next comes something terrifying: after the author_abbrev we get 4 - 6 hex values
# try to handle it with the citationbuilder
search = r'(<hmu_cd_assert_author_abbrev betacodeval="(.*?)" />)((█[⓪①②③④⑤⑥⑦⑧⑨ⓐⓑⓒⓓⓔⓕ]{1,2}\s){2,})'
texttoclean = re.sub(search, citationbuilder, texttoclean)
return texttoclean
def hexrunner(texttoclean):
"""
First you find the hex runs.
Then you send these to the citation builder to be read/decoded
All of the heavy lifting happens there
:param texttoclean:
:return: texttoclean
"""
# re.sub documentation: if repl is a function, it is called for every non-overlapping occurrence of pattern. The function takes a single match object argument, and returns the replacement string
search = r'((█[⓪①②③④⑤⑥⑦⑧⑨ⓐⓑⓒⓓⓔⓕ]{1,2}\s){1,})'
texttoclean = re.sub(search, citationbuilder, texttoclean)
return texttoclean
#
# misc little tools
#
# some of these functions done similarly in idtfiles parsing
# refactor to consolidate if you care
#
def converthextoascii(hextoasciimatch):
"""
undo the human readability stuff so you can decode the raw data
:param hextoasciimatch:
:return:
"""
asciilevel = ''
hexlevel = hextoasciimatch.group(2)
hexlevel = highunicodetohex(hexlevel)
hexvals = re.split(r'█', hexlevel)
del hexvals[0]
asciilevel = bitswapchars(hexvals)
a = hextoasciimatch.group(1) + asciilevel + hextoasciimatch.group(3)
return a
def cleanworkname(betacodeworkname):
"""
turn a betacode workname into a 'proper' workname
:param betacodeworkname:
:return:
"""
if '*' in betacodeworkname and '$' not in betacodeworkname:
re.sub(r'\*', r'$*', betacodeworkname)
percents = re.compile(r'%(\d{1,3})')
workname = re.sub(percents, percentsubstitutes, betacodeworkname)
ands = re.compile(r'&(\d+)(.*?)')
workname = re.sub(ands, latinauthorandshiftparser, workname)
workname = re.sub(r'\[2(.*?)]2', r'⟨\1⟩', workname)
workname = re.sub(r'<.*?>', '', workname)
workname = re.sub(r'&\d+(`|)', '', workname) # e.g.: IG I&4`2&
workname = re.sub(r'&', '', workname)
workname = re.sub(r'`', '', workname)
# nb latin diacriticals still potentially here
return workname
def colonshift(txt):
"""
colon to middot
:param txt:
:return:
"""
return re.sub(r':', '·', txt)
def insertnewlines(txt):
"""
break up the file into something you can walk through line-by-line
:param txt:
:return:
"""
txt = re.sub(r'(<hmu_set_level)', r'\n\1', txt)
txt = txt.split('\n')
return txt
def tidyupterm(word: str, punct=None) -> str:
"""
remove gunk that should not be present in a cleaned line
pass punct if you do not feel like compiling it 100k times
:param word:
:param punct:
:return:
"""
if not punct:
elidedextrapunct = '\′‵‘·̆́“”„—†⌈⌋⌊⟫⟪❵❴⟧⟦(«»›‹⟨⟩⸐„⸏⸖⸎⸑–⏑–⏒⏓⏔⏕⏖⌐∙×⁚̄⁝͜‖͡⸓͝'
extrapunct = elidedextrapunct + '’'
punct = re.compile('[{s}]'.format(s=re.escape(punctuation + extrapunct)))
# hard to know whether or not to do the editorial insertions stuff: ⟫⟪⌈⌋⌊
# word = re.sub(r'\[.*?\]','', word) # '[o]missa' should be 'missa'
word = re.sub(r'[0-9]', '', word)
word = re.sub(punct, '', word)
invals = u'jv'
outvals = u'iu'
word = word.translate(str.maketrans(invals, outvals))
return word
def capitalvforcapitalu(thetext: str) -> str:
"""
Latin texts have "Ubi" instead of "Vbi"
Livy and Justinian even have Ualerius instead of Valerius
you need to do this right away before any markup, etc appears
a problem: Greek inside a Roman author will get mangled: "PARADOXON II: Ὅτι αϝ)τάρκηϲ ἡ ἀρετὴ πρὸϲ εϝ)δαιμονίαν."
This arises from: $*(/OTI AV)TA/RKHS H( A)RETH\ PRO\S EV)DAIMONI/AN.&}1
:param thetext:
:return:
"""
# print('applying U -> V transformation to {a}'.format(a=thisauthor))
thetext = re.sub(r'U', 'V', thetext)
lookingfor = re.compile(r'\$(.*?)&')
uswap = lambda x: '$' + re.sub(r'V', r'U', x.group(1)) + '&'
thetext = re.sub(lookingfor, uswap, thetext)
return thetext | gpl-3.0 | 2,887,905,583,091,091,000 | 32.451843 | 264 | 0.656619 | false |
limiear/soyprice | tests/chicago_test.py | 1 | 1161 | import unittest
import abstract
from soyprice.model import database as db
from soyprice.model.soy import Chicago
import os
import datetime
import requests
class TestChicago(abstract.TestCase):
def setUp(self):
self.remove('cache*')
self.cache = db.open()
self.var = Chicago(self.cache)
self.date_list = [datetime.date(2014,10,8) + datetime.timedelta(days=i)
for i in range(3)]
self.values = [None, None, None]
def tearDown(self):
db.close(self.cache)
def test_scrap(self):
self.assertEquals(self.var.scrap(self.date_list),
list(set(self.values)))
def test_get(self):
# It query by an old date_list and it obtains all None (because
# the cache is empty).
values = self.var.get(self.date_list)
self.assertEquals(values, zip(self.date_list, self.values))
# It ask by the today value, and it return a real value.
date_list = [datetime.datetime.now()]
values = self.var.get(date_list)
self.assertGreater(values[0][1], 0.)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,152,060,232,558,465,000 | 28.025 | 79 | 0.613264 | false |
rspavel/spack | var/spack/repos/builtin/packages/ibm-java/package.py | 1 | 3917 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
import os
class IbmJava(Package):
"""Binary distribution of the IBM Java Software Development Kit
for big and little-endian powerpc (power7, 8 and 9). Note: IBM
is fairly aggressive about taking down old versions, so old
(and even current) versions may not be available."""
homepage = "https://developer.ibm.com/javasdk/"
maintainers = ['mwkrentel']
# Note: IBM is fairly aggressive about taking down old versions,
# so we may need to update this frequently. Also, old revs may
# not be available for download.
version_list = [
('8.0.6.11', 'ppc64', '6fd17a6b9a34bb66e0db37f6402dc1b7612d54084c94b859f4a42f445fd174d4'),
('8.0.6.11', 'ppc64le', 'd69ff7519e32e89db88a9a4d4d88d1881524073ac940f35d3860db2c6647be2e'),
('8.0.6.10', 'ppc64', 'ff5151ead88f891624eefe33d80d56c325ca0aa4b93bd96c135cad326993eda2'),
('8.0.6.10', 'ppc64le', 'ea99ab28dd300b08940882d178247e99aafe5a998b1621cf288dfb247394e067'),
('8.0.6.7', 'ppc64', 'a1accb461a039af4587ea86511e317fea1d423e7f781459a17ed3947afed2982'),
('8.0.6.7', 'ppc64le', '9ede76a597af28c7f10c6f8a68788cc2dcd39fdab178c9bac56df8b3766ac717'),
('8.0.6.0', 'ppc64', 'e142746a83e47ab91d71839d5776f112ed154ae180d0628e3f10886151dad710'),
('8.0.6.0', 'ppc64le', '18c2eccf99225e6e7643141d8da4110cacc39f2fa00149fc26341d2272cc0102'),
('8.0.5.30', 'ppc64', 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'),
('8.0.5.30', 'ppc64le', 'dec6434d926861366c135aac6234fc28b3e7685917015aa3a3089c06c3b3d8f0'),
]
# There are separate tar files for big and little-endian machine
# types. And no, this won't work cross platform.
for (ver, mach, sha) in version_list:
if mach == platform.machine():
version(ver, sha256=sha, expand=False)
provides('java@8')
conflicts('target=x86_64:', msg='ibm-java is only available for ppc64 and ppc64le')
# This assumes version numbers are 4-tuples: 8.0.5.30
def url_for_version(self, version):
# Convert 8.0.5.30 to 8.0-5.30 for the file name.
dash = '{0}.{1}-{2}.{3}'.format(*(str(version).split('.')))
url = ('http://public.dhe.ibm.com/ibmdl/export/pub/systems/cloud'
'/runtimes/java/{0}/linux/{1}/ibm-java-sdk-{2}-{1}'
'-archive.bin').format(version, platform.machine(), dash)
return url
@property
def home(self):
return self.prefix
@property
def libs(self):
return find_libraries(['libjvm'], root=self.home, recursive=True)
def setup_run_environment(self, env):
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('JAVA_HOME', self.home)
def setup_dependent_package(self, module, dependent_spec):
self.spec.home = self.home
def install(self, spec, prefix):
archive = os.path.basename(self.stage.archive_file)
# The archive.bin file is quite fussy and doesn't work as a
# symlink.
if os.path.islink(archive):
targ = os.readlink(archive)
os.unlink(archive)
copy(targ, archive)
# The properties file is how we avoid an interactive install.
prop = 'properties'
with open(prop, 'w') as file:
file.write('INSTALLER_UI=silent\n')
file.write('USER_INSTALL_DIR=%s\n' % prefix)
file.write('LICENSE_ACCEPTED=TRUE\n')
# Running the archive file installs everything.
set_executable(archive)
inst = Executable(join_path('.', archive))
inst('-f', prop)
return
| lgpl-2.1 | -2,857,033,170,495,520,000 | 38.969388 | 100 | 0.66071 | false |
EmanueleCannizzaro/scons | test/option-b.py | 1 | 1515 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/option-b.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.run(arguments = '-b .',
stderr = "Warning: ignoring -b option\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 2,320,631,516,134,854,700 | 34.232558 | 90 | 0.747855 | false |
gt-ros-pkg/hrl_autobed_dev | autobed_pose_estimator/src/visualize_pressure_mat_3d.py | 1 | 1438 | import numpy as np
import roslib; roslib.load_manifest('hrl_msgs')
import rospy
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from hrl_msgs.msg import FloatArrayBare
class Visualize3D():
def __init__(self):
rospy.Subscriber("/fsascan", FloatArrayBare, self.pressure_map_callback)
self.fig = plt.figure()
#self.ax = self.fig.add_subplot(111, projection='3d')
#self.ax = self.fig.gca(projection='3d')
self.ax = self.fig.add_subplot(111, projection='3d')
a=np.linspace(0, 3.14, 64)
b=np.linspace(0, 3.14, 27)
self.physical_pressure_map = np.zeros((64, 27))
def pressure_map_callback(self, data):
'''This callback accepts incoming pressure map from
the Vista Medical Pressure Mat and sends it out.
Remember, this array needs to be binarized to be used'''
self.physical_pressure_map = np.resize(np.asarray(data.data), (64, 27))
def run(self):
x, y=np.meshgrid(np.linspace(0, 63, 64), np.linspace(0, 26, 27));
z=self.physical_pressure_map
self.ax.plot_wireframe(x, y, z, rstride=10, cstride=10)
plt.show()
if __name__ == "__main__":
a=Visualize3D()
rospy.init_node('visualize_pressure_3D', anonymous=True)
rate=rospy.Rate(5)
while not rospy.is_shutdown():
a.run()
rate.sleep()
| mit | 3,793,219,297,732,556,300 | 30.955556 | 80 | 0.625869 | false |
jeffmarcom/checkbox | plainbox/plainbox/abc.py | 1 | 6650 | # This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.abc` -- abstract base classes
============================================
Those classes are actually implemented in the plainbox.impl package. This
module is here so that the essential API concepts are in a single spot and are
easier to understand (by not being mixed with additional source code).
.. note::
This module has API stability guarantees. We are not going to break or
introduce backwards incompatible interfaces here without following our API
deprecation policy. All existing features will be retained for at least
three releases. All deprecated symbols will warn when they will cease to be
available.
"""
from abc import ABCMeta, abstractproperty, abstractmethod
class IJobDefinition(metaclass=ABCMeta):
"""
Job definition that contains a mixture of meta-data and executable
information that can be consumed by the job runner to produce results.
"""
# XXX: All IO methods to save/load this would be in a helper class/function
# that would also handle format detection, serialization and validation.
@abstractproperty
def plugin(self):
"""
Name of the job interpreter.
Various interpreters are provided by the job runner.
"""
@abstractproperty
def name(self):
"""
Name of the job
"""
@abstractproperty
def requires(self):
"""
List of expressions that need to be true for this job to be available
This value can be None
"""
@abstractproperty
def command(self):
"""
The shell command to execute to perform the job.
The return code, standard output and standard error streams are
automatically recorded and processed, depending on the plugin type.
This value can be None
"""
@abstractproperty
def description(self):
"""
Human-readable description of the job.
This field is typically used to include execution and verification
steps for manual and human-assisted tests.
This value can be None
"""
@abstractproperty
def depends(self):
"""
Comma-delimited dependency expression
This field can be used to express job dependencies. If a job depends on
another job it can only start if the other job had ran and succeeded.
This is the original data as provided when constructed. Use
get_direct_dependencies() to obtain the parsed equivalent.
This value can be None
"""
class IJobResult(metaclass=ABCMeta):
"""
Class for representing results from a single job
"""
# XXX: We could also store stuff like job duration and other meta-data but
# I wanted to avoid polluting this proposal with mundane details
@abstractproperty
def job(self):
"""
Definition of the job
The object implements IJobDefinition
"""
@abstractproperty
def outcome(self):
"""
Outcome of the test.
The result of either automatic or manual verification. Depending on the
plugin (test type). Available values are defined as class properties
above.
"""
@abstractproperty
def comments(self):
"""
The comment that was added by the user, if any
"""
@abstractproperty
def io_log(self):
"""
A sequence of tuples (delay, stream-name, data) where delay is the
delay since the previous message seconds (typically a fractional
number), stream name is either 'stdout' or 'stderr' and data is the
bytes object that was obtained from that stream.
"""
# XXX: it could also encode 'stdin' if the user was presented with a
# console to type in and we sent that to the process.
# XXX: This interface is low-level but captures everything that has
# occurred and is text-safe. You can call an utility function to
# convert that to a text string that most closely represents what a
# user would see, having ran this command in the terminal.
@abstractproperty
def return_code(self):
"""
Command return code.
This is the return code of the process started to execute the command
from the job definition. It can also encode the signal that the
process was killed with, if any.
"""
class IJobRunner(metaclass=ABCMeta):
"""
Something that can run a job definition and produce results.
You can run many jobs with one runner, each time you'll get additional
result object. Typically you will need to connect the runner to a user
interface but headless mode is also possible.
"""
@abstractmethod
def run_job(self, job):
"""
Run the specified job.
Calling this method may block for arbitrary amount of time. User
interfaces should ensure that it runs in a separate thread.
The return value is a JobResult object that contains all the data that
was captured during the execution of the job. Some jobs may not return
a JobResult value.
"""
# XXX: threads suck, could we make this fully asynchronous? The only
# thing that we really want is to know when the command has stopped
# executing. We could expose the underlying process mechanics so that
# QT/GTK applications could tie that directly into their event loop.
class IUserInterfaceIO(metaclass=ABCMeta):
"""
Base class that allows job runner to interact with the user interface.
"""
@abstractmethod
def get_manual_verification_outcome(self):
"""
Get the outcome of the manual verification, as according to the user
May raise NotImplementedError if the user interface cannot provide this
answer.
"""
| gpl-3.0 | -5,495,530,179,601,678,000 | 31.281553 | 79 | 0.671278 | false |
kriberg/stationspinner | stationspinner/accounting/urls.py | 1 | 1078 | from django.conf.urls import url, include
from rest_framework import routers
from stationspinner.accounting.views import APIKeyViewset, LoginView, \
CapsulerViewset, LogoutView, MissingTrainingViewset, RevalidateKeyView, \
ObtainAuthTokenView, CheckAuthTokenView, RefreshAuthTokenView
router = routers.DefaultRouter()
router.register(r'capsuler', CapsulerViewset, 'capsuler')
router.register(r'missing-training', MissingTrainingViewset, 'missing-training')
router.register(r'apikeys', APIKeyViewset, 'apikeys')
urlpatterns = [
url(r'^obtaintoken/$', ObtainAuthTokenView.as_view(), name='accounting_obtaintoken'),
url(r'^checktoken/$', CheckAuthTokenView.as_view(), name='accounting_checktoken'),
url(r'^refreshtoken/$', RefreshAuthTokenView.as_view(), name='accounting_refreshtoken'),
url(r'^logout/$', LogoutView.as_view(), name='accounting_logout'),
url(r'^login/$', LoginView.as_view(), name='accounting_login'),
url(r'^revalidate-key/$', RevalidateKeyView.as_view(), name='accounting_revalidate_key'),
url(r'^', include(router.urls)),
] | agpl-3.0 | -8,888,957,358,530,266,000 | 52.95 | 93 | 0.751391 | false |
robert-7/gambit | src/python/gambit/tests/test_games/personal_test_games/read_and_solve.py | 1 | 1668 | import gambit, time, os, sys
from utils import compute_time_of
'''
Read GAME_FILE in SAVED_GAMES_DIRECTORY and create a tree from it.
'''
def create_tree(args):
os.chdir(SAVED_GAMES_DIRECTORY)
g = gambit.Game.read_game(GAME_FILE)
os.chdir(PARENT_DIRECTORY)
return g
'''
Solve the game.
'''
def solve_game(args):
# create solver
solver = gambit.nash.ExternalEnumMixedSolver()
# solve game
solutions = solver.solve(g)
return solutions
'''
Create a solutions directory and save the solutions there.
'''
def print_solutions(args):
# create directory and cd in
os.mkdir(SOLUTIONS_DIRECTORY)
os.chdir(SOLUTIONS_DIRECTORY)
# create file
file_name = "{}-PSP.nfg".format(time.strftime("%Y-%m-%d %H:%M:%S"))
target_file = open(file_name, 'w')
# print solutions
for solution in solutions:
target_file.write("{}\n".format(str(solution)))
# go back out
os.chdir(PARENT_DIRECTORY)
if __name__ == '__main__':
# directory names
PARENT_DIRECTORY = ".."
SAVED_GAMES_DIRECTORY = "saved"
SOLUTIONS_DIRECTORY = "Solutions-for-PSP-Games-{}".format(time.strftime("%Y-%m-%d %H:%M:%S"))
# get file name
if len(sys.argv) != 2:
print("ERROR: Please supply a filename in the {} directory".format(SAVED_GAMES_DIRECTORY))
sys.exit(2)
else:
GAME_FILE = sys.argv[1]
# read file and create game tree
g = compute_time_of(1, "Creating Tree", create_tree)
# solve the game
solutions = compute_time_of(2, "Solving Game", solve_game)
# print the solutions to a file
compute_time_of(3, "Printing Solutions", print_solutions)
| gpl-2.0 | 2,718,975,047,332,783,600 | 24.272727 | 98 | 0.642086 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/basics/map.py | 1 | 3291 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.basics.map Contains the Map class, a dictionary-like object that provides
# access to its values by using the 'dot' notation.
# Import standard modules
import warnings
# -----------------------------------------------------------------
class Map(dict):
"""
With this class you can use the Map object like another dictionary (including json serialization) or with the dot notation.
Credit: 'epool' (http://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary)
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.iteritems():
self[k] = v
if kwargs:
for k, v in kwargs.iteritems():
self[k] = v
# -----------------------------------------------------------------
def __getattr__(self, attr):
if attr.startswith("__") and attr.endswith("__"): raise AttributeError
return self.get(attr)
# -----------------------------------------------------------------
def __setattr__(self, key, value):
if key.startswith("__") and key.endswith("__"): super(Map, self).__setattr__(key, value)
self.__setitem__(key, value)
# -----------------------------------------------------------------
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
# -----------------------------------------------------------------
def __delattr__(self, item):
self.__delitem__(item)
# -----------------------------------------------------------------
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
# -----------------------------------------------------------------
def set_items(self, items):
"""
This function allows setting multiple items in the Map from a dictionary (or dictionary-like)
:param items:
:return:
"""
# Loop over all the items in the 'items' dictionary
for key in items:
# Check whether an item with this key exists in this Map
if key in self:
# Check if the item is composed of other options (i.e. this is a nested Map), or if it is just a simple variable
if isinstance(self[key], Map): self[key].set_items(items[key])
# If it is a simple variable, just set the corresponding item of this Map
else: self[key] = items[key]
# If the key is not present, show a warning
else: warnings.warn("An item with the key '" + key + "' is not present in the Map")
# -----------------------------------------------------------------
| mit | 8,885,819,028,416,556,000 | 35.555556 | 128 | 0.452888 | false |
legalsylvain/oca-custom | __unported__/oca_freestore/models/github_organization.py | 1 | 5227 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-Today: Odoo Community Association (OCA)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
class GithubOrganization(models.Model):
_name = 'github.organization'
_inherit = ['github.connector']
_order = 'name'
# Columns Section
name = fields.Char(string='Organization Name', required=True)
billing_email = fields.Char(string='Billing Email', readonly=True)
image = fields.Binary(string='Image', readonly=True)
github_login = fields.Char(
string='Github Name', required=True, help="The technical name of"
" your organization on github.\n\nShould be organization_name if the"
" url of your organization is https://github.com/organization_name/")
description = fields.Char(string='Description', readonly=True)
email = fields.Char(string='Email', readonly=True)
website_url = fields.Char(string='Website URL', readonly=True)
location = fields.Char(string='Location', readonly=True)
public_member_ids = fields.Many2many(
string='Members', comodel_name='res.partner',
relation='github_organization_partner_rel', column1='organization_id',
column2='partner_id', readonly=True)
public_member_qty = fields.Integer(
string='Members Quantity', compute='compute_public_member_qty',
store=True)
repository_ids = fields.Many2many(
string='Repositories', comodel_name='github.repository',
relation='github_organization_repository_rel',
column1='organization_id', column2='repository_id', readonly=True)
repository_qty = fields.Integer(
string='Repositories Quantity', compute='compute_repository_qty',
store=True)
organization_serie_ids = fields.One2many(
string='Organization Series',
comodel_name='github.organization.serie',
inverse_name='organization_id')
ignore_repository_names = fields.Text(
string='Ignored Repositories', help="Set here repository names you"
" you want to ignore. One repository per line. Exemple:\n"
"odoo-community.org\n"
"OpenUpgrade\n")
# Compute Section
@api.multi
@api.depends('public_member_ids')
def compute_public_member_qty(self):
for organization in self:
organization.public_member_qty =\
len(organization.public_member_ids)
@api.multi
@api.depends('repository_ids', 'repository_ids.organization_id')
def compute_repository_qty(self):
for organization in self:
organization.repository_qty =\
len(organization.repository_ids)
# Custom Section
def github_2_odoo(self, data):
return {
'name': data['name'],
'description': data['description'],
'location': data['location'],
'website_url': data['blog'],
'email': data['email'],
'billing_email': data['billing_email'],
'image': self.get_base64_image_from_github(data['avatar_url']),
}
# Action Section
@api.multi
def button_full_synchronize(self):
return self.button_synchronize(True)
@api.multi
def button_light_synchronize(self):
return self.button_synchronize(False)
@api.multi
def button_synchronize(self, full):
partner_obj = self.env['res.partner']
repository_obj = self.env['github.repository']
team_obj = self.env['github.team']
for organization in self:
# Get organization data
data = self.get_data_from_github(
'organization', [organization.github_login])
organization.write(self.github_2_odoo(data))
# Get Members datas
member_ids = []
for data in self.get_datalist_from_github(
'organization_members', [organization.github_login]):
partner = partner_obj.create_or_update_from_github(data, full)
member_ids.append(partner.id)
organization.public_member_ids = member_ids
# Get Repositories datas
repository_ids = []
ignored_list = organization.ignore_repository_names and\
organization.ignore_repository_names.split("\n") or []
for data in self.get_datalist_from_github(
'organization_repositories', [organization.github_login]):
if data['name'] not in ignored_list:
repository = repository_obj.create_or_update_from_github(
organization.id, data, full)
repository_ids.append(repository.id)
organization.repository_ids = repository_ids
# Get Teams datas
team_ids = []
for data in self.get_datalist_from_github(
'organization_teams', [organization.github_login]):
team = team_obj.create_or_update_from_github(
organization.id, data, full)
team_ids.append(team.id)
organization.team_ids = team_ids
| agpl-3.0 | 4,811,223,040,879,998,000 | 36.604317 | 78 | 0.62005 | false |
Edern76/MatchsticksGame | name.py | 1 | 11395 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
import threading, time
from tkinter import *
from tkinter.messagebox import *
status = '' #Variable servant à la fois à indiquer si l'on peut poursuivre l'exécution du programme (càd si l'on a entré un (ou plusieurs, selon la situation) nom valide et cliqué sur OK) et à récupérer le (ou les) nom entré.
class simpleGUI(threading.Thread):
'''
Classe associée à l'interface demandant un seul nom.
Voir la description de la classe GUI du fichier game.py pour l'explication quant au Thread (il s'agit en effet d'une situation semblable, où l'on doit faire tourner un autre programme en parallèle de l'interface)
'''
def __init__(self, attachTo = None, rootConsole = None):
threading.Thread.__init__(self)
self.attachTo = attachTo
self.rootConsole = None
def run(self):
if self.attachTo is None:
mainCon = Tk()
else:
mainCon = self.attachTo #Même remarque que pour l'interface de game.py : on ne peut avoir qu'une seule fenêtre crée avec la fonction Tk() par application
global status
nom = StringVar("") #Les variables associées à des entrées Tkinter ne sont pas du même type que les variables Python traditionnelles. Pour les chaînes de caractères, ce sont des instances de la classe StringVar (définie dans le module Tkinter)
status = '' #On réinitialise la variable status
titleFrame = Frame(mainCon)
title = Label(titleFrame, text = 'Veuillez entrer votre nom.', justify = CENTER) #On crée le message demandant le nom
title.pack(fill = X, expand = Y, anchor = CENTER) #On fait en sorte que le label remplisse tout l'espace horizontal de la fenêtre, afin que le texte soit centré
titleFrame.grid(row = 0, column = 0, columnspan = 8) #La méthode grid permet de placer les éléments selon un tableau, dont le fonctionnement peut rappeler celui d'un tableur. Ici, on place titleFrame dans la 'cellule' (0,0), et on lui fait occuper 8 colonnes.
field = Entry(mainCon, textvariable = nom, justify = CENTER) #On crée le champ de texte dans lequel on va entrer le nom
field.grid(row = 1, column = 2, columnspan = 4)
def cancel():
'''
Fonction appelée lors du clic sur le bouton annuler
'''
global status
status = None
mainCon.destroy()
def confirm():
'''
Fonction appelée lors du clic sur le bouton OK
'''
global status
#NB : Afin de convertir une StringVar en une chaîne de caractère 'classique', on doit appeler la méthode get(). Sinon, si l'on récupère directement la valeur de la StringVar on obtient 'PY_VAR0'
if nom.get() == "" or " " in nom.get():
showerror('Saisie invalide', "Le nom ne doit pas contenir d'espace ou être vide")
status = ''
elif len(nom.get()) > 12:
showerror('Saisie invalide', 'Le nom ne doit pas excéder 12 caractères')
status = ''
elif nom.get() == "None": #Artefact de développement : les anciennes versions utilisaient la chaîne de caractère 'None' plutôt que le 'symbole' spécial None pour l'annulation, à cause d'un problème dans la fonction askSimpleName. Ce problème a depuis été résolu, donc théoriquement avoir 'None' en chaîne de caractères pour nom ne devrait pas poser de problème, mais nous avons préféré garder cette condition au cas où.
showerror('Saisie invalide', 'Le nom ne doit pas être "None"')
status = ''
elif '\n' in nom.get() or chr(92) in nom.get(): #La fonction chr(92) renvoie le 92 ème caractère de la table ASCII, c'est à dire l'antislash (\). On ne peut en effet pas l'écrire directement, car c'est un symbole réservé à Python (associé à une lettre, il permet de modifier des chaînes de caractères, en ajoutant par exemple des retours à la ligne, et l'on ne souhaite pas avoir de telles choses dans nos noms afin de ne pas provoquer d'erreurs d'affichage)
showerror('Saisie invalide', "Le nom ne doit pas contenir d'antislash")
else: #Si aucune anomalie n'a été rencontrée
status = nom.get() #La variable status prend la valeur entrée dans le champ de texte
mainCon.destroy()
buttonOk = Button(mainCon, text = 'OK', width = 5, command = confirm)
buttonCancel = Button(mainCon, text = 'Annuler', width = 5, command = cancel)
buttonOk.grid(row = 5, column = 1, columnspan = 3)
buttonCancel.grid(row = 5, column = 4, columnspan = 3)
colNum, rowNum = mainCon.grid_size() #On récupère la taille de la console dans laquelle on affiche l'interface en termes de lignes et colonnes (utilisées par la méthode grid())
for x in range(colNum):
mainCon.grid_columnconfigure(x, minsize = 25) #On fait en sorte que toutes les colonnes, mêmes vides, aient une largeur d'au moins 25 pixels. Cela empêche les colonnes vides d'être invisibles.
for y in range(rowNum):
mainCon.grid_rowconfigure(y, minsize = 5) #Même principe qu'au dessus, mais avec les lignes, et une hauteur minimale de 5 pixels
if self.attachTo is None:
mainCon.mainloop()
else:
mainCon.update() #Artefact de développement : Ne devrait plus être nécessaire en théorie (date en effet de lorsque nous avions essayé d'avoir plusieurs fenêtres crées avec Tk(), ce qui crée énormément d'erreurs et de problèmes, même après avoir rajouté cette ligne), mais gardé au cas où.
if self.rootConsole is not None:
self.rootConsole.update() #Même remarque que pour mainCon.update().
class multiGUI(threading.Thread):
'''
Classe associée à l'interface demandant deux noms.
Comme elle possède beaucoup de similitudes avec la classe simpleGUI, la plupart des lignes redondantes ne sont pas commentées à nouveau dans cette classe.
'''
def __init__(self, attachTo = None, rootConsole = None):
threading.Thread.__init__(self)
self.attachTo = attachTo
self.rootConsole = None
def run(self):
if self.attachTo is None:
mainCon = Tk()
else:
mainCon = self.attachTo
global status
nom1 = StringVar("")
nom2 = StringVar("")
status = ''
titleFrame = Frame(mainCon)
title = Label(titleFrame, text = 'Veuillez entrer vos noms.', justify = CENTER)
title.pack(fill = X, expand = Y, anchor = CENTER)
titleFrame.grid(row = 0, column = 0, columnspan = 8)
P1label = Label(mainCon, text = "Joueur 1 :") #Label situé à gauche du champ de texte du nom du joueur 1
P1field = Entry(mainCon, textvariable = nom1, justify = CENTER) #Champ de texte du nom du joueur 1
P1label.grid(row = 2, column = 0, columnspan = 2)
P1field.grid(row = 2, column = 2, columnspan = 6)
P2label = Label(mainCon, text = "Joueur 2 :") #Label situé à gauche du champ de texte du nom du joueur 2
P2field = Entry(mainCon, textvariable = nom2, justify = CENTER) #Champ de texte du nom du joueur 2
P2label.grid(row = 3, column = 0, columnspan = 2)
P2field.grid(row = 3, column = 2, columnspan = 6)
def cancel():
global status
status = None
mainCon.destroy()
def confirm():
global status
if (nom1.get() == "" or " " in nom1.get()) or (nom2.get() == "" or " " in nom2.get()):
showerror('Saisie invalide', "Le nom ne doit pas contenir d'espace ou être vide")
status = ''
elif (len(nom1.get()) > 12) or (len(nom2.get()) > 12) :
showerror('Saisie invalide', 'Le nom ne doit pas excéder 12 caractères')
status = ''
elif (nom1.get() == "None") or (nom2.get() == "None"):
showerror('Saisie invalide', 'Le nom ne doit pas être "None"')
status = ''
elif nom1.get() == nom2.get():
showerror('Saisie invalide', 'Les deux noms ne doivent pas être identiques')
elif ('\n' in nom1.get() or chr(92) in nom1.get()) or ('\n' in nom2.get() or chr(92) in nom2.get()): #La fonction chr(92) renvoie le 92 ème caractère de la table ASCII, c'est à dire l'antislash (\). On ne peut en effet pas l'écrire directement, car c'est un symbole réservé à Python (associé à une lettre, il permet de modifier des chaînes de caractères, en ajoutant par exemple des retours à la ligne, et l'on ne souhaite pas avoir de telles choses dans nos noms afin de ne pas provoquer d'erreurs d'affichage)
showerror('Saisie invalide', "Le nom ne doit pas contenir d'antislash")
else:
status = (nom1.get(), nom2.get()) #La variable status prend la valeur d'un tuple (objet simillaire à une liste, mais non modifiable) contenant le nom du joueur 1 et celui du joueur 2
mainCon.destroy()
buttonOk = Button(mainCon, text = 'OK', width = 5, command = confirm)
buttonCancel = Button(mainCon, text = 'Annuler', width = 5, command = cancel)
buttonOk.grid(row = 5, column = 1, columnspan = 3)
buttonCancel.grid(row = 5, column = 4, columnspan = 3)
colNum, rowNum = mainCon.grid_size()
for x in range(colNum):
mainCon.grid_columnconfigure(x, minsize = 25)
for y in range(rowNum):
mainCon.grid_rowconfigure(y, minsize = 5)
if self.attachTo is None:
mainCon.mainloop()
else:
mainCon.update()
if self.rootConsole is not None:
self.rootConsole.update()
def askSimpleName(attachTo = None, rootConsole = None):
'''
Fonction permettant de demander un seul nom
'''
global status
status = ''
asker = simpleGUI(attachTo, rootConsole) #On crée une instance de simpleGUI
asker.start() #On démarre le Thread de l'interface
time.sleep(3) #On laisse le temps à l'interface de se charger
while True:
if status == '': #Si l'on a entré aucun nom
continue #On continue la boucle (çad la fonction ne fait rien tant que l'interface n'a pas renvoyé de nom)
else:
break #Sinon, on sort de la boucle
return status #On renvoie le nom entré
def askMultipleNames(attachTo = None, rootConsole = None):
'''
Fonction permettant de demander plusieurs noms
Quasi identique à la fonction askSimpleName, les parties redondantes n'ont pas été commentées à nouveau
'''
global status
status = ''
asker = multiGUI(attachTo, rootConsole)
asker.start()
time.sleep(3)
while True:
if status == "":
continue
else:
break
return status #On renvoie les noms. Notez qu'il ne s'agit plus d'une chaîne de caractères comme dans askSimpleName, mais d'un tuple constitué de deux chaînes de caractères.
if __name__ == '__main__':
from tkinter.messagebox import *
showerror('Erreur', 'Veuillez lancer main.pyw pour démarrer le programme')
| mit | -1,213,387,413,839,428,000 | 59.473118 | 523 | 0.639459 | false |
DataONEorg/d1_python | gmn/src/d1_gmn/app/management/commands/diag-restore-sciobj.py | 1 | 8217 | # This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attempt to restore missing local Science Objects from replicas.
A DataONE Science Object is a block of bytes with an associated System Metadata XML
doc.
GMN stores the bytes in a directory hierarchy on the local filesystem and the System
Metadata in a Postgres database.
This will attempt to restore objects that have been lost or damaged due to data
corruption or loss in the filesystem or database.
This procedure should be able to always restore system metadata. However, restore of
object bytes depends on a valid replica being available on the CN or another MN.
The procedure is as follows:
- For the purposes of this command, "damaged" and "lost" data are equivalents. Both are
handled with the same software procedure, where an attempt is made to completely
replace the data with a recovered version. So this documentation uses "lost" to
describe both lost and damaged data.
- The CN is queried for a list of PIDs of objects for which this GMN is registered as
either the authoritative source, or holder of a replica.
- For each PID, both the System Metadata and the object bytes are checked to be
available and undamaged on this GMN.
- System Metadata is checked by fully generating the System Metadata document from
the database, then validating it against the XMLSchema for the DataONE types. The
System metadata is considered to be lost if any step of the procedure cannot be
completed.
- Object bytes are checked by recalculating the checksum from the currently stored
bytes (if any) and comparing it with the correct checksum, stored in the System
Metadata. The object is considered to be lost if unable to generate a checksum or
if the checksum does not match the checksum stored for the object.
- Proxy objects are checked in the same way, except that the checksum is recalculated
on the object bytes as streamed from its location on the 3rd party server.
- Lost System Metadata is always restored from the CN, which holds a copy of system
metadata for all objects that are known to the CN, which will include the objects for
which the CN returned the PIDs in the initial query that this procedure performed.
- For lost object bytes, the restore process depends on the type of storage used for
the object bytes, which is either local filesystem or proxy from 3rd party server.
- The bytes for objects stored in the filesystem, which is the most common situation,
are restored by querying the CN for a list of known locations for the object. If this
GMN, where the object bytes are known to be lost, is the only available location
listed, the object bytes cannot be restored by this command. If the object bytes are
not available elsewhere, the object will have to be considered as lost by DataONE. It
should be set as archived in the CN system metadata, so that it is not listed in any
further search results. To help prevent this from happening, make sure that all
objects on this GMN have a replication policy allowing replicas to be distributed to
other MNs in the DataONE federation.
- Proxy objects are objects where the bytes are stored on a 3rd party server instead of
on the local filesystem, and GMN stores only a URL reference to the location. Support
for proxy objects is a vendor specific GMN feature, so the URL is not part of the
official system metadata. As the URL is stored together with the system metadata in
the database, lost system metadata will mean lost object reference URL as well. Since
the URL is not in the system metadata, restoring the system metadata from the CN will
not restore the URL and so will not recover the actual location.
- Since object bytes for proxy objects are not stored locally, lost object bytes will
either have been caused by lost URL reference, which is handled as described above,
or by the 3rd party server no longer returning the object bytes at the URL reference
location. In both cases,the only remaining option for a fully automated restore of
the object depends on a valid replica being available on the CN or another MN, in
which case GMN can restore the object as a regular local object from the replica.
However, this converts the object from a proxy object to a local object. Depending on
the available hardware vs. the added storage space that will be required, this may
not be desirable, so the option to convert proxy objects to local if required for
automated restore is disabled by default. See --help for how to set this option.
- See the documentation for ``audit-proxy-sciobj`` for information on how to repair
proxy objects that could not be restored automatically by this command.
"""
import d1_gmn.app.did
import d1_gmn.app.mgmt_base
import d1_gmn.app.sysmeta
class Command(d1_gmn.app.mgmt_base.GMNCommandBase):
def __init__(self, *args, **kwargs):
super().__init__(__doc__, __name__, *args, **kwargs)
def add_components(self, parser):
self.using_single_instance(parser)
def handle_serial(self):
pass
# async def fix(self, async_client, url):
# self.log.info("Processing: {}".format(url))
# proxy_tracker = self.tracker("Create missing proxy objects")
#
# sysmeta_pyxb = await async_client.get_system_metadata(url)
#
# sysmeta_checksum_pyxb = sysmeta_pyxb.checksum
# # print(d1_common.checksum.format_checksum(calculated_checksum_pyxb))
# calculated_checksum_pyxb = await self.calculate_checksum(
# async_client, url, sysmeta_checksum_pyxb.algorithm
# )
# # print(d1_common.checksum.format_checksum(sysmeta_checksum_pyxb))
# if not d1_common.checksum.are_checksums_equal(
# sysmeta_checksum_pyxb, calculated_checksum_pyxb
# ):
# proxy_tracker.event(
# "Skipped: Checksum mismatch", f'url="{url}"', is_error=True
# )
#
# d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, url)
#
# proxy_tracker.event("Fixed", f'url="{url}"')
#
# async def is_online(self, async_client, url):
# try:
# async with await async_client.session.head(url) as response:
# # Handle redirect responses as well, as redirects are not followed for
# # HEAD requests.
# return response.status in (200, 300, 301, 302, 303, 307, 308)
# except aiohttp.ClientError:
# return False
#
# async def calculate_checksum(self, async_client: t.D1Client, url: str, checksum_algo: str) -> t.Checksum:
# """Calculate the checksum on proxy object stored on a 3rd party server.
#
# The objected is calculated on the stream, without bytes being buffered in memory
# or stored locally.
#
# Returns:
# A DataONE Checksum PyXB type.
#
# """
# checksum_calculator = d1_common.checksum.get_checksum_calculator_by_dataone_designator(
# checksum_algo
# )
# async with await async_client.session.get(url) as response:
# async for chunk_str, _ in response.content.iter_chunks():
# checksum_calculator.update(chunk_str)
#
# checksum_pyxb = d1_common.types.dataoneTypes.checksum(
# checksum_calculator.hexdigest()
# )
# checksum_pyxb.algorithm = checksum_algo
# return checksum_pyxb
| apache-2.0 | -1,802,804,323,378,339,300 | 48.203593 | 111 | 0.720579 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.