repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
polyaxon/polyaxon | core/polyaxon/deploy/schemas/service.py | 1 | 18141 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import EXCLUDE, fields
from polyaxon.deploy.schemas.celery import CelerySchema
from polyaxon.k8s import k8s_schemas
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class ServiceSchema(BaseCamelSchema):
enabled = fields.Bool(allow_none=True)
image = fields.Str(allow_none=True)
image_tag = fields.Str(allow_none=True)
image_pull_policy = fields.Str(allow_none=True)
replicas = fields.Int(allow_none=True)
concurrency = fields.Int(allow_none=True)
resources = SwaggerField(cls=k8s_schemas.V1ResourceRequirements, allow_none=True)
class Meta:
unknown = EXCLUDE
@staticmethod
def schema_config():
return Service
class Service(BaseConfig):
SCHEMA = ServiceSchema
REDUCED_ATTRIBUTES = [
"enabled",
"image",
"imageTag",
"imagePullPolicy",
"replicas",
"concurrency",
"resources",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
):
self.enabled = enabled
self.image = image
self.image_tag = image_tag
self.image_pull_policy = image_pull_policy
self.replicas = replicas
self.concurrency = concurrency
self.resources = resources
class WorkerServiceSchema(ServiceSchema):
celery = fields.Nested(CelerySchema, allow_none=True)
@staticmethod
def schema_config():
return WorkerServiceConfig
class WorkerServiceConfig(Service):
SCHEMA = WorkerServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["celery"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
celery=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.celery = celery
class HelperServiceSchema(ServiceSchema):
sleep_interval = fields.Int(allow_none=True)
sync_interval = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return HelperServiceConfig
class HelperServiceConfig(Service):
SCHEMA = HelperServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + [
"sleepInterval",
"syncInterval",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
sleep_interval=None,
sync_interval=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.sleep_interval = sleep_interval
self.sync_interval = sync_interval
class AgentServiceSchema(ServiceSchema):
instance = fields.String(allow_none=True)
token = fields.String(allow_none=True)
is_replica = fields.Bool(allow_none=True)
compressed_logs = fields.Bool(allow_none=True)
@staticmethod
def schema_config():
return AgentServiceConfig
class AgentServiceConfig(Service):
SCHEMA = AgentServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + [
"instance",
"token",
"isReplica",
"compressedLogs",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
instance=None,
token=None,
is_replica=None,
compressed_logs=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.instance = instance
self.token = token
self.is_replica = is_replica
self.compressed_logs = compressed_logs
class OperatorServiceSchema(ServiceSchema):
skip_crd = fields.Bool(allow_none=True, data_key="skipCRD")
use_crd_v1beta1 = fields.Bool(allow_none=True, data_key="useCRDV1Beta1")
@staticmethod
def schema_config():
return OperatorServiceConfig
class OperatorServiceConfig(Service):
SCHEMA = OperatorServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["skipCRD", "useCRDV1Beta1"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
skip_crd=None,
use_crd_v1beta1=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.skip_crd = skip_crd
self.use_crd_v1beta1 = use_crd_v1beta1
class ApiServiceSchema(ServiceSchema):
service = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ApiServiceConfig
class ApiServiceConfig(Service):
SCHEMA = ApiServiceSchema
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
service=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.service = service
class HooksSchema(ServiceSchema):
load_fixtures = fields.Bool(allow_none=True)
@staticmethod
def schema_config():
return HooksConfig
class HooksConfig(Service):
SCHEMA = HooksSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["loadFixtures"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
load_fixtures=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.load_fixtures = load_fixtures
class ThirdPartyServiceSchema(ServiceSchema):
enabled = fields.Bool(allow_none=True)
persistence = fields.Dict(allow_none=True)
node_selector = fields.Dict(allow_none=True)
affinity = fields.Dict(allow_none=True)
tolerations = fields.List(fields.Dict(allow_none=True), allow_none=True)
@staticmethod
def schema_config():
return ThirdPartyService
class ThirdPartyService(Service):
SCHEMA = ThirdPartyServiceSchema
REDUCED_ATTRIBUTES = [
"enabled",
"image",
"imageTag",
"imagePullPolicy",
"replicas",
"concurrency",
"resources",
"persistence",
"nodeSelector",
"affinity",
"tolerations",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
)
self.enabled = enabled
self.persistence = persistence
self.node_selector = node_selector
self.affinity = affinity
self.tolerations = tolerations
class PostgresqlSchema(ThirdPartyServiceSchema):
postgres_user = fields.Str(allow_none=True)
postgres_password = fields.Str(allow_none=True)
postgres_database = fields.Str(allow_none=True)
conn_max_age = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return PostgresqlConfig
class PostgresqlConfig(ThirdPartyService):
SCHEMA = PostgresqlSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"postgresUser",
"postgresPassword",
"postgresDatabase",
"connMaxAge",
]
def __init__(
self,
enabled=None,
postgres_user=None,
postgres_password=None,
postgres_database=None,
conn_max_age=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.postgres_user = postgres_user
self.postgres_password = postgres_password
self.postgres_database = postgres_database
self.conn_max_age = conn_max_age
class RedisSchema(ThirdPartyServiceSchema):
image = fields.Raw(allow_none=True)
non_broker = fields.Bool(allow_none=True)
use_password = fields.Bool(allow_none=True)
password = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return RedisConfig
class RedisConfig(ThirdPartyService):
SCHEMA = RedisSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"nonBroker",
"usePassword",
"password",
]
def __init__(
self,
enabled=None,
non_broker=None,
use_password=None,
password=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.non_broker = non_broker
self.use_password = use_password
self.password = password
class RabbitmqSchema(ThirdPartyServiceSchema):
rabbitmq_username = fields.Str(allow_none=True)
rabbitmq_password = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return RabbitmqConfig
class RabbitmqConfig(ThirdPartyService):
SCHEMA = RabbitmqSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"rabbitmqUsername",
"rabbitmqPassword",
]
def __init__(
self,
enabled=None,
rabbitmq_username=None,
rabbitmq_password=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.rabbitmq_username = rabbitmq_username
self.rabbitmq_password = rabbitmq_password
class ExternalServiceSchema(BaseCamelSchema):
user = fields.Str(allow_none=True)
password = fields.Str(allow_none=True)
host = fields.Str(allow_none=True)
port = fields.Int(allow_none=True)
database = fields.Str(allow_none=True)
use_password = fields.Bool(allow_none=True)
conn_max_age = fields.Int(allow_none=True)
pgbouncer = fields.Dict(allow_none=True)
options = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ExternalService
class ExternalService(BaseConfig):
SCHEMA = ExternalServiceSchema
REDUCED_ATTRIBUTES = [
"user",
"password",
"host",
"port",
"database",
"usePassword",
"connMaxAge",
"pgbouncer",
"options",
]
def __init__(
self,
user=None,
password=None,
host=None,
port=None,
database=None,
use_password=None,
conn_max_age=None,
pgbouncer=None,
options=None,
):
self.user = user
self.password = password
self.host = host
self.port = port
self.database = database
self.use_password = use_password
self.conn_max_age = conn_max_age
self.pgbouncer = pgbouncer
self.options = options
class ExternalBackendSchema(BaseCamelSchema):
enabled = fields.Bool(allow_none=True)
backend = fields.Str(allow_none=True)
options = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ExternalBackend
class ExternalBackend(BaseConfig):
SCHEMA = ExternalBackendSchema
REDUCED_ATTRIBUTES = [
"enabled",
"backend",
"options",
]
def __init__(
self,
enabled=None,
backend=None,
options=None,
):
self.enabled = enabled
self.backend = backend
self.options = options
class AuthServicesSchema(BaseCamelSchema):
github = fields.Nested(ExternalBackendSchema, allow_none=True)
gitlab = fields.Nested(ExternalBackendSchema, allow_none=True)
bitbucket = fields.Nested(ExternalBackendSchema, allow_none=True)
google = fields.Nested(ExternalBackendSchema, allow_none=True)
saml = fields.Nested(ExternalBackendSchema, allow_none=True)
@staticmethod
def schema_config():
return AuthServicesConfig
class AuthServicesConfig(BaseConfig):
SCHEMA = AuthServicesSchema
REDUCED_ATTRIBUTES = [
"github",
"gitlab",
"bitbucket",
"google",
"saml",
]
def __init__(
self,
github=None,
gitlab=None,
bitbucket=None,
google=None,
saml=None,
):
self.github = github
self.gitlab = gitlab
self.bitbucket = bitbucket
self.google = google
self.saml = saml
class ExternalServicesSchema(BaseCamelSchema):
redis = fields.Nested(ExternalServiceSchema, allow_none=True)
rabbitmq = fields.Nested(ExternalServiceSchema, allow_none=True)
postgresql = fields.Nested(ExternalServiceSchema, allow_none=True)
gateway = fields.Nested(ExternalServiceSchema, allow_none=True)
api = fields.Nested(ExternalServiceSchema, allow_none=True)
transactions = fields.Nested(ExternalBackendSchema, allow_none=True)
analytics = fields.Nested(ExternalBackendSchema, allow_none=True)
metrics = fields.Nested(ExternalBackendSchema, allow_none=True)
errors = fields.Nested(ExternalBackendSchema, allow_none=True)
auth = fields.Nested(AuthServicesSchema, allow_none=True)
allowed_versions = fields.List(fields.Str(), allow_none=True)
@staticmethod
def schema_config():
return ExternalServicesConfig
class ExternalServicesConfig(BaseConfig):
SCHEMA = ExternalServicesSchema
REDUCED_ATTRIBUTES = [
"redis",
"rabbitmq",
"postgresql",
"gateway",
"api",
"transactions",
"analytics",
"metrics",
"errors",
"auth",
"allowedVersions",
]
def __init__(
self,
redis=None,
rabbitmq=None,
postgresql=None,
gateway=None,
api=None,
transactions=None,
analytics=None,
metrics=None,
errors=None,
auth=None,
allowed_versions=None,
):
self.redis = redis
self.rabbitmq = rabbitmq
self.postgresql = postgresql
self.gateway = gateway
self.api = api
self.transactions = transactions
self.analytics = analytics
self.metrics = metrics
self.errors = errors
self.auth = auth
self.allowed_versions = allowed_versions
| apache-2.0 | -6,594,309,741,235,618,000 | 25.253256 | 85 | 0.607023 | false |
clovertrail/cloudinit-bis | cloudinit/config/cc_ntp.py | 1 | 4063 | # vi: ts=4 expandtab
#
# Copyright (C) 2016 Canonical Ltd.
#
# Author: Ryan Harper <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
NTP
---
**Summary:** enable and configure ntp
Handle ntp configuration. If ntp is not installed on the system and ntp
configuration is specified, ntp will be installed. If there is a default ntp
config file in the image or one is present in the distro's ntp package, it will
be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
servers or pools are provided, 4 pools will be used in the format
``{0-3}.{distro}.pool.ntp.org``.
**Internal name:** ``cc_ntp``
**Module frequency:** per instance
**Supported distros:** centos, debian, fedora, opensuse, ubuntu
**Config keys**::
ntp:
pools:
- 0.company.pool.ntp.org
- 1.company.pool.ntp.org
- ntp.myorg.org
servers:
- my.ntp.server.local
- ntp.ubuntu.com
- 192.168.23.2
"""
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
import os
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
def handle(name, cfg, cloud, log, _args):
"""
Enable and configure ntp
ntp:
pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org']
servers: ['192.168.2.1']
"""
ntp_cfg = cfg.get('ntp', {})
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(("'ntp' key existed in config,"
" but not a dictionary type,"
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
if 'ntp' not in cfg:
LOG.debug("Skipping module named %s,"
"not present or disabled by cfg", name)
return True
install_ntp(cloud.distro.install_packages, packages=['ntp'],
check_exe="ntpd")
rename_ntp_conf()
write_ntp_config_template(ntp_cfg, cloud)
def install_ntp(install_func, packages=None, check_exe="ntpd"):
if util.which(check_exe):
return
if packages is None:
packages = ['ntp']
install_func(packages)
def rename_ntp_conf(config=NTP_CONF):
if os.path.exists(config):
util.rename(config, config + ".dist")
def generate_server_names(distro):
names = []
for x in range(0, NR_POOL_SERVERS):
name = "%d.%s.pool.ntp.org" % (x, distro)
names.append(name)
return names
def write_ntp_config_template(cfg, cloud):
servers = cfg.get('servers', [])
pools = cfg.get('pools', [])
if len(servers) == 0 and len(pools) == 0:
LOG.debug('Adding distro default ntp pool servers')
pools = generate_server_names(cloud.distro.name)
params = {
'servers': servers,
'pools': pools,
}
template_fn = cloud.get_template_filename('ntp.conf.%s' %
(cloud.distro.name))
if not template_fn:
template_fn = cloud.get_template_filename('ntp.conf')
if not template_fn:
raise RuntimeError(("No template found, "
"not rendering %s"), NTP_CONF)
templater.render_to_file(template_fn, NTP_CONF, params)
| gpl-3.0 | -5,945,354,172,292,923,000 | 28.442029 | 79 | 0.624169 | false |
lexotero/try-it | apps/congress/admin.py | 1 | 1496 | from django.contrib import admin
from apps.congress.models import Edition, Company, Speaker, Tag, Track, ActivityFormat, Activity
class TagAdmin(admin.ModelAdmin):
search_fields = ["name"]
class EditionAdmin(admin.ModelAdmin):
list_display = ["start", "end", "name", "description"]
search_fields = ["name", "description"]
class TrackAdmin(admin.ModelAdmin):
list_display = ["name", "description"]
search_fields = ["name", "description"]
class CompanyAdmin(admin.ModelAdmin):
list_display = ["name"]
search_fields = ["name", "description"]
class SpeakerAdmin(admin.ModelAdmin):
list_display = ["first_name", "last_name"]
list_filter = ["company"]
search_fields = ["first_name", "last_name"]
class ActivityFormatAdmin(admin.ModelAdmin):
list_display = ["name", "description"]
search_fields = ["name", "description"]
class ActivityAdmin(admin.ModelAdmin):
filter_horizontal = ["tags", "speakers", "companies"]
list_display = ["id", "title", "start", "end", "format", "track"]
list_filter = ["format", "tags", "edition", "track"]
search_fields = ["title", "description", "tags", "format", "speakers", "companies"]
admin.site.register(Tag, TagAdmin)
admin.site.register(Edition, EditionAdmin)
admin.site.register(Track, TrackAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Speaker, SpeakerAdmin)
admin.site.register(ActivityFormat, ActivityFormatAdmin)
admin.site.register(Activity, ActivityAdmin)
| apache-2.0 | -4,323,105,958,972,232,700 | 29.530612 | 96 | 0.701203 | false |
miing/mci_migo | identityprovider/tests/openid_server/per_version/test_openid_teams.py | 1 | 1669 | from identityprovider.const import LAUNCHPAD_TEAMS_NS
from identityprovider.tests.helpers import OpenIDTestCase
class OpenIDTeamsTestCase(OpenIDTestCase):
def test(self):
# = Launchpad OpenID Teams Extension =
# The Launchpad OpenID server implements a custom team membership
# extension. This allows a relying party to query whether the user is
# a member of one or more teams.
# Now perform an OpenID authentication request, querying membership in
# four team names:
# * one that the user is a member of
# * one that does not exist
# * one that does exist but the user is not a member of
# * one that is actually the user's name
t = self.factory.make_team('ubuntu-team')
self.factory.add_account_to_team(self.account, t)
self.factory.make_team('launchpad-beta-testers')
teams = ('ubuntu-team,no-such-team,launchpad-beta-testers,%s' %
self.account.person.name)
response = self.do_openid_dance(self.claimed_id, teams=teams)
response = self.login(response)
# authorize sending team membership
response = self.yes_to_decide(response, teams=('ubuntu-team',))
info = self.complete_from_response(response)
self.assertEqual(info.status, 'success')
self.assertEqual(info.getSigned(LAUNCHPAD_TEAMS_NS, 'is_member'),
'ubuntu-team')
# The response reveals that the user is a member of the ubuntu-team.
# As specified, there is no difference in the response for non-existent
# teams and teams that the user is not a member of.
| agpl-3.0 | -9,369,407,362,418,992 | 40.725 | 79 | 0.65728 | false |
lixun910/pysal | pysal/viz/splot/_viz_esda_mpl.py | 1 | 44623 | import matplotlib.pyplot as plt
import matplotlib as mpl
import geopandas as gpd
import numpy as np
from pysal.lib.weights.contiguity import Queen
from pysal.lib.weights.spatial_lag import lag_spatial
import seaborn as sbn
from pysal.explore.esda.moran import (Moran_Local, Moran_Local_BV,
Moran, Moran_BV)
import warnings
from pysal.model.spreg import OLS
from matplotlib import patches, colors
from ._viz_utils import (mask_local_auto, moran_hot_cold_spots,
splot_colors)
"""
Lightweight visualizations for esda using Matplotlib and Geopandas
TODO
* geopandas plotting, change round shapes in legends to boxes
* prototype moran_facet using `seaborn.FacetGrid`
"""
__author__ = ("Stefanie Lumnitz <[email protected]>")
def _create_moran_fig_ax(ax, figsize):
"""
Creates matplotlib figure and axes instances
for plotting moran visualizations. Adds common viz design.
"""
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ax.spines['left'].set_position(('axes', -0.05))
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.05))
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
return fig, ax
def moran_scatterplot(moran, zstandard=True, p=None, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot
Parameters
----------
moran : esda.moran instance
Values of Moran's I Global, Bivariate and Local
Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
p : float, optional
If given, the p-value threshold for significance
for Local Autocorrelation analysis. Points will be colored by
significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import (Moran, Moran_BV,
... Moran_Local, Moran_Local_BV)
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate esda.moran Objects
>>> moran = Moran(y, w)
>>> moran_bv = Moran_BV(y, x, w)
>>> moran_loc = Moran_Local(y, w)
>>> moran_loc_bv = Moran_Local_BV(y, x, w)
Plot
>>> fig, axs = plt.subplots(2, 2, figsize=(10,10),
... subplot_kw={'aspect': 'equal'})
>>> moran_scatterplot(moran, p=0.05, ax=axs[0,0])
>>> moran_scatterplot(moran_loc, p=0.05, ax=axs[1,0])
>>> moran_scatterplot(moran_bv, p=0.05, ax=axs[0,1])
>>> moran_scatterplot(moran_loc_bv, p=0.05, ax=axs[1,1])
>>> plt.show()
"""
if isinstance(moran, Moran):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_global_scatterplot(moran=moran, zstandard=zstandard,
ax=ax, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_BV):
if p is not None:
warnings.warn('`p` is only used for plotting `esda.moran.Moran_Local`\n'
'or `Moran_Local_BV` objects')
fig, ax = _moran_bv_scatterplot(moran_bv=moran, ax=ax,
scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local):
fig, ax = _moran_loc_scatterplot(moran_loc=moran, zstandard=zstandard,
ax=ax, p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
elif isinstance(moran, Moran_Local_BV):
fig, ax = _moran_loc_bv_scatterplot(moran_loc_bv=moran, ax=ax,
p=p, scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return fig, ax
def _moran_global_scatterplot(moran, zstandard=True, ax=None,
scatter_kwds=None, fitline_kwds=None):
"""
Global Moran's I Scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> moran_scatterplot(moran)
>>> plt.show()
customize plot
>>> fig, ax = moran_scatterplot(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> ax.set_xlabel('Donations')
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization defaults
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7, 7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Scatterplot' +
' (' + str(round(moran.I, 2)) + ')')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran.w, moran.z)
fit = OLS(moran.z[:, None], lag[:, None])
# plot
ax.scatter(moran.z, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
else:
lag = lag_spatial(moran.w, moran.y)
b, a = np.polyfit(moran.y, lag, 1)
# plot
ax.scatter(moran.y, lag, **scatter_kwds)
ax.plot(moran.y, a + b*moran.y, **fitline_kwds)
# dashed vert at mean of the attribute
ax.vlines(moran.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran.y.min(), moran.y.max(), alpha=0.5,
linestyle='--')
return fig, ax
def plot_moran_simulation(moran, ax=None, fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborn.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Simulated reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran_simulation(moran)
>>> plt.show()
customize plot
>>> plot_moran_simulation(moran, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran.I, 0, 1, **fitline_kwds)
ax.vlines(moran.EI, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Moran I: ' + str(round(moran.I, 2)))
return fig, ax
def plot_moran(moran, zstandard=True, scatter_kwds=None,
fitline_kwds=None, **kwargs):
"""
Global Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran : esda.moran.Moran instance
Values of Moran's I Global Autocorrelation Statistics
zstandard : bool, optional
If True, Moran Scatterplot will show z-standardized attribute and
spatial lag values. Default =True.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran
>>> from pysal.viz.splot.esda import plot_moran
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Global Moran
>>> moran = Moran(y, w)
plot
>>> plot_moran(moran)
>>> plt.show()
customize plot
>>> plot_moran(moran, zstandard=False,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_simulation(moran, ax=axs[0], fitline_kwds=fitline_kwds, **kwargs)
moran_scatterplot(moran, zstandard=zstandard, ax=axs[1],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_bv_scatterplot(moran_bv, ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Bivariate Moran Scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> moran_scatterplot(moran_bv)
>>> plt.show()
customize plot
>>> moran_scatterplot(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('color', splot_colors['moran_base'])
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute X')
ax.set_ylabel('Spatial Lag of Y')
ax.set_title('Bivariate Moran Scatterplot' +
' (' + str(round(moran_bv.I, 2)) + ')')
# plot and set standards
lag = lag_spatial(moran_bv.w, moran_bv.zy)
fit = OLS(moran_bv.zy[:, None], lag[:, None])
# plot
ax.scatter(moran_bv.zx, lag, **scatter_kwds)
ax.plot(lag, fit.predy, **fitline_kwds)
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
return fig, ax
def plot_moran_bv_simulation(moran_bv, ax=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the
vertical moran fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv_simulation
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv_simulation(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv_simulation(moran_bv,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if fitline_kwds is None:
fitline_kwds = dict()
figsize = kwargs.pop('figsize', (7, 7))
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize)
# plot distribution
shade = kwargs.pop('shade', True)
color = kwargs.pop('color', splot_colors['moran_base'])
sbn.kdeplot(moran_bv.sim, shade=shade, color=color, ax=ax, **kwargs)
# customize plot
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.vlines(moran_bv.I, 0, 1, **fitline_kwds)
ax.vlines(moran_bv.EI_sim, 0, 1)
ax.set_title('Reference Distribution')
ax.set_xlabel('Bivariate Moran I: ' + str(round(moran_bv.I, 2)))
return fig, ax
def plot_moran_bv(moran_bv, scatter_kwds=None, fitline_kwds=None, **kwargs):
"""
Bivariate Moran's I simulated reference distribution and scatterplot.
Parameters
----------
moran_bv : esda.moran.Moran_BV instance
Values of Bivariate Moran's I Autocorrelation Statistics
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
and vertical fitline. Default =None.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the figure,
passed to seaborne.kdeplot.
Returns
-------
fig : Matplotlib Figure instance
Bivariate moran scatterplot and reference distribution figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV
>>> from pysal.viz.splot.esda import plot_moran_bv
Load data and calculate weights
>>> link_to_data = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link_to_data)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
Calculate Bivariate Moran
>>> moran_bv = Moran_BV(x, y, w)
plot
>>> plot_moran_bv(moran_bv)
>>> plt.show()
customize plot
>>> plot_moran_bv(moran_bv, fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
figsize = kwargs.pop('figsize', (10, 4))
fig, axs = plt.subplots(1, 2, figsize=figsize,
subplot_kw={'aspect': 'equal'})
plot_moran_bv_simulation(moran_bv, ax=axs[0], fitline_kwds=fitline_kwds,
**kwargs)
moran_scatterplot(moran_bv, ax=axs[1],scatter_kwds=scatter_kwds,
fitline_kwds=fitline_kwds)
axs[0].set(aspect="auto")
axs[1].set(aspect="auto")
return fig, axs
def _moran_loc_scatterplot(moran_loc, zstandard=True, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local(y, w)
plot
>>> moran_scatterplot(m)
>>> plt.show()
customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc, Moran_Local):
raise ValueError("`moran_loc` is not a\n " +
"esda.moran.Moran_Local instance")
if 'color' in scatter_kwds or 'c' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn('To change the color use cmap with a colormap of 5,\n' +
' color defines the LISA category')
# colors
spots = moran_hot_cold_spots(moran_loc, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran Local Scatterplot')
# plot and set standards
if zstandard is True:
lag = lag_spatial(moran_loc.w, moran_loc.z)
fit = OLS(moran_loc.z[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc.z, fit.predy, **scatter_kwds)
else:
lag = lag_spatial(moran_loc.w, moran_loc.y)
b, a = np.polyfit(moran_loc.y, lag, 1)
# dashed vert at mean of the attribute
ax.vlines(moran_loc.y.mean(), lag.min(), lag.max(), alpha=0.5,
linestyle='--')
# dashed horizontal at mean of lagged attribute
ax.hlines(lag.mean(), moran_loc.y.min(), moran_loc.y.max(), alpha=0.5,
linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots)
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
else:
scatter_kwds.setdefault('c', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(moran_loc.y, a + b*moran_loc.y, **fitline_kwds)
ax.scatter(moran_loc.y, lag, **scatter_kwds)
return fig, ax
def lisa_cluster(moran_loc, gdf, p=0.05, ax=None,
legend=True, legend_kwds=None, **kwargs):
"""
Create a LISA Cluster map
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe instance
The Dataframe containing information to plot. Note that `gdf` will be
modified, so calling functions should use a copy of the user
provided `gdf`. (either using gdf.assign() or gdf.copy())
p : float, optional
The p-value threshold for significance. Points will
be colored by significance.
ax : matplotlib Axes instance, optional
Axes in which to plot the figure in multiple Axes layout.
Default = None
legend : boolean, optional
If True, legend for maps will be depicted. Default = True
legend_kwds : dict, optional
Dictionary to control legend formatting options. Example:
``legend_kwds={'loc': 'upper left', 'bbox_to_anchor': (0.92, 1.05)}``
Default = None
**kwargs : keyword arguments, optional
Keywords designing and passed to geopandas.GeoDataFrame.plot().
Returns
-------
fig : matplotlip Figure instance
Figure of LISA cluster map
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import lisa_cluster
Data preparation and statistical analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting
>>> fig = lisa_cluster(moran_loc, gdf)
>>> plt.show()
"""
# retrieve colors5 and labels from mask_local_auto
_, colors5, _, labels = mask_local_auto(moran_loc, p=p)
# define ListedColormap
hmap = colors.ListedColormap(colors5)
if ax is None:
figsize = kwargs.pop('figsize', None)
fig, ax = plt.subplots(1, figsize=figsize)
else:
fig = ax.get_figure()
gdf.assign(cl=labels).plot(column='cl', categorical=True,
k=2, cmap=hmap, linewidth=0.1, ax=ax,
edgecolor='white', legend=legend,
legend_kwds=legend_kwds, **kwargs)
ax.set_axis_off()
ax.set_aspect('equal')
return fig, ax
def plot_local_autocorrelation(moran_loc, gdf, attribute, p=0.05,
region_column=None, mask=None,
mask_color='#636363', quadrant=None,
legend=True, scheme='Quantiles',
cmap='YlGnBu', figsize=(15, 4),
scatter_kwds=None, fitline_kwds=None):
'''
Produce three-plot visualisation of Moran Scatteprlot, LISA cluster
and Choropleth maps, with Local Moran region and quadrant masking
Parameters
----------
moran_loc : esda.moran.Moran_Local or Moran_Local_BV instance
Values of Moran's Local Autocorrelation Statistic
gdf : geopandas dataframe
The Dataframe containing information to plot the two maps.
attribute : str
Column name of attribute which should be depicted in Choropleth map.
p : float, optional
The p-value threshold for significance. Points and polygons will
be colored by significance. Default = 0.05.
region_column: string, optional
Column name containing mask region of interest. Default = None
mask: str, optional
Identifier or name of the region to highlight. Default = None
mask_color: str, optional
Color of mask. Default = '#636363'
quadrant : int, optional
Quadrant 1-4 in scatterplot masking values in LISA cluster and
Choropleth maps. Default = None
figsize: tuple, optional
W, h of figure. Default = (15,4)
legend: boolean, optional
If True, legend for maps will be depicted. Default = True
scheme: str, optional
Name of PySAL classifier to be used. Default = 'Quantiles'
cmap: str, optional
Name of matplotlib colormap used for plotting the Choropleth.
Default = 'YlGnBu'
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline
in the scatterplot. Default =None.
Returns
-------
fig : Matplotlib figure instance
Moran Scatterplot, LISA cluster map and Choropleth.
axs : list of Matplotlib axes
Lisat of Matplotlib axes plotted.
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_Local
>>> from pysal.viz.splot.esda import plot_local_autocorrelation
Data preparation and analysis
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> moran_loc = Moran_Local(y, w)
Plotting with quadrant mask and region mask
>>> fig = plot_local_autocorrelation(moran_loc, gdf, 'Donatns', p=0.05,
... region_column='Dprtmnt',
... mask=['Ain'], quadrant=1)
>>> plt.show()
'''
fig, axs = plt.subplots(1, 3, figsize=figsize,
subplot_kw={'aspect': 'equal'})
# Moran Scatterplot
moran_scatterplot(moran_loc, p=p, ax=axs[0],
scatter_kwds=scatter_kwds, fitline_kwds=fitline_kwds)
axs[0].set_aspect('auto')
# Lisa cluster map
# TODO: Fix legend_kwds: display boxes instead of points
lisa_cluster(moran_loc, gdf, p=p, ax=axs[1], legend=legend,
legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)})
axs[1].set_aspect('equal')
# Choropleth for attribute
gdf.plot(column=attribute, scheme=scheme, cmap=cmap,
legend=legend, legend_kwds={'loc': 'upper left',
'bbox_to_anchor': (0.92, 1.05)},
ax=axs[2], alpha=1)
axs[2].set_axis_off()
axs[2].set_aspect('equal')
# MASKING QUADRANT VALUES
if quadrant is not None:
# Quadrant masking in Scatterplot
mask_angles = {1: 0, 2: 90, 3: 180, 4: 270} # rectangle angles
# We don't want to change the axis data limits, so use the current ones
xmin, xmax = axs[0].get_xlim()
ymin, ymax = axs[0].get_ylim()
# We are rotating, so we start from 0 degrees and
# figured out the right dimensions for the rectangles for other angles
mask_width = {1: abs(xmax),
2: abs(ymax),
3: abs(xmin),
4: abs(ymin)}
mask_height = {1: abs(ymax),
2: abs(xmin),
3: abs(ymin),
4: abs(xmax)}
axs[0].add_patch(patches.Rectangle((0, 0), width=mask_width[quadrant],
height=mask_height[quadrant],
angle=mask_angles[quadrant],
color='#E5E5E5', zorder=-1, alpha=0.8))
# quadrant selection in maps
non_quadrant = ~(moran_loc.q == quadrant)
mask_quadrant = gdf[non_quadrant]
df_quadrant = gdf.iloc[~non_quadrant]
union2 = df_quadrant.unary_union.boundary
# LISA Cluster mask and cluster boundary
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[1], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[1], color='#E5E5E5')
# CHOROPLETH MASK
with warnings.catch_warnings(): # temorarily surpress geopandas warning
warnings.filterwarnings('ignore', category=UserWarning)
mask_quadrant.plot(column=attribute, scheme=scheme, color='white',
ax=axs[2], alpha=0.7, zorder=1)
gpd.GeoSeries([union2]).plot(linewidth=1, ax=axs[2], color='#E5E5E5')
# REGION MASKING
if region_column is not None:
# masking inside axs[0] or Moran Scatterplot
ix = gdf[region_column].isin(mask)
df_mask = gdf[ix]
x_mask = moran_loc.z[ix]
y_mask = lag_spatial(moran_loc.w, moran_loc.z)[ix]
axs[0].plot(x_mask, y_mask, color=mask_color, marker='o',
markersize=14, alpha=.8, linestyle="None", zorder=-1)
# masking inside axs[1] or Lisa cluster map
union = df_mask.unary_union.boundary
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[1], color=mask_color)
# masking inside axs[2] or Chloropleth
gpd.GeoSeries([union]).plot(linewidth=2, ax=axs[2], color=mask_color)
return fig, axs
def _moran_loc_bv_scatterplot(moran_loc_bv, p=None,
ax=None, scatter_kwds=None, fitline_kwds=None):
"""
Moran Bivariate Scatterplot with option of coloring of Local Moran Statistics
Parameters
----------
moran_loc : esda.moran.Moran_Local_BV instance
Values of Moran's I Local Autocorrelation Statistics
p : float, optional
If given, the p-value threshold for significance. Points will
be colored by significance. By default it will not be colored.
Default =None.
ax : Matplotlib Axes instance, optional
If given, the Moran plot will be created inside this axis.
Default =None.
scatter_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points.
Default =None.
fitline_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import geopandas as gpd
>>> from pysal.lib.weights.contiguity import Queen
>>> from pysal.lib import examples
>>> from pysal.explore.esda.moran import Moran_Local_BV
>>> from pysal.viz.splot.esda import moran_scatterplot
Load data and calculate Moran Local statistics
>>> link = examples.get_path('Guerry.shp')
>>> gdf = gpd.read_file(link)
>>> x = gdf['Suicids'].values
>>> y = gdf['Donatns'].values
>>> w = Queen.from_dataframe(gdf)
>>> w.transform = 'r'
>>> m = Moran_Local_BV(x, y, w)
Plot
>>> moran_scatterplot(m)
>>> plt.show()
Customize plot
>>> moran_scatterplot(m, p=0.05,
... fitline_kwds=dict(color='#4393c3')))
>>> plt.show()
"""
# to set default as an empty dictionary that is later filled with defaults
if scatter_kwds is None:
scatter_kwds = dict()
if fitline_kwds is None:
fitline_kwds = dict()
if p is not None:
if not isinstance(moran_loc_bv, Moran_Local_BV):
raise ValueError("`moran_loc_bv` is not a\n" +
"esda.moran.Moran_Local_BV instance")
if 'color' in scatter_kwds or 'cmap' in scatter_kwds:
warnings.warn("To change the color use cmap with a colormap of 5,\n" +
"c defines the LISA category, color will interfere with c")
# colors
spots_bv = moran_hot_cold_spots(moran_loc_bv, p)
hmap = colors.ListedColormap(['#bababa', '#d7191c', '#abd9e9',
'#2c7bb6', '#fdae61'])
# define customization
scatter_kwds.setdefault('alpha', 0.6)
scatter_kwds.setdefault('s', 40)
fitline_kwds.setdefault('alpha', 0.9)
# get fig and ax
fig, ax = _create_moran_fig_ax(ax, figsize=(7,7))
# set labels
ax.set_xlabel('Attribute')
ax.set_ylabel('Spatial Lag')
ax.set_title('Moran BV Local Scatterplot')
# plot and set standards
lag = lag_spatial(moran_loc_bv.w, moran_loc_bv.zy)
fit = OLS(moran_loc_bv.zy[:, None], lag[:, None])
# v- and hlines
ax.axvline(0, alpha=0.5, color='k', linestyle='--')
ax.axhline(0, alpha=0.5, color='k', linestyle='--')
if p is not None:
fitline_kwds.setdefault('color', 'k')
scatter_kwds.setdefault('cmap', hmap)
scatter_kwds.setdefault('c', spots_bv)
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zx, fit.predy,
**scatter_kwds)
else:
scatter_kwds.setdefault('color', splot_colors['moran_base'])
fitline_kwds.setdefault('color', splot_colors['moran_fit'])
ax.plot(lag, fit.predy, **fitline_kwds)
ax.scatter(moran_loc_bv.zy, fit.predy, **scatter_kwds)
return fig, ax
def moran_facet(moran_matrix, figsize=(16,12),
scatter_bv_kwds=None, fitline_bv_kwds=None,
scatter_glob_kwds=dict(color='#737373'), fitline_glob_kwds=None):
"""
Moran Facet visualization.
Includes BV Morans and Global Morans on the diagonal.
Parameters
----------
moran_matrix : esda.moran.Moran_BV_matrix instance
Dictionary of Moran_BV objects
figsize : tuple, optional
W, h of figure. Default =(16,12)
scatter_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
off-diagonal Moran_BV plots.
Default =None.
fitline_bv_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
off-diagonal Moran_BV plots.
Default =None.
scatter_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the scatter points of
diagonal Moran plots.
Default =None.
fitline_glob_kwds : keyword arguments, optional
Keywords used for creating and designing the moran fitline of
diagonal Moran plots.
Default =None.
Returns
-------
fig : Matplotlib Figure instance
Bivariate Moran Local scatterplot figure
axarr : matplotlib Axes instance
Axes in which the figure is plotted
Examples
--------
Imports
>>> import matplotlib.pyplot as plt
>>> import pysal.lib as lp
>>> import numpy as np
>>> import geopandas as gpd
>>> from pysal.explore.esda.moran import Moran_BV_matrix
>>> from pysal.viz.splot.esda import moran_facet
Load data and calculate Moran Local statistics
>>> f = gpd.read_file(lp.examples.get_path("sids2.dbf"))
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f[var]) for var in varnames]
>>> w = lp.io.open(lp.examples.get_path("sids2.gal")).read()
>>> moran_matrix = Moran_BV_matrix(vars, w, varnames = varnames)
Plot
>>> fig, axarr = moran_facet(moran_matrix)
>>> plt.show()
Customize plot
>>> fig, axarr = moran_facet(moran_matrix,
... fitline_bv_kwds=dict(color='#4393c3'))
>>> plt.show()
"""
nrows = int(np.sqrt(len(moran_matrix))) + 1
ncols = nrows
fig, axarr = plt.subplots(nrows, ncols, figsize=figsize,
sharey=True, sharex=True)
fig.suptitle('Moran Facet')
for row in range(nrows):
for col in range(ncols):
if row == col:
global_m = Moran(moran_matrix[row, (row+1) % 4].zy,
moran_matrix[row, (row+1) % 4].w)
_moran_global_scatterplot(global_m, ax= axarr[row,col],
scatter_kwds=scatter_glob_kwds,
fitline_kwds=fitline_glob_kwds)
axarr[row, col].set_facecolor('#d9d9d9')
else:
_moran_bv_scatterplot(moran_matrix[row,col],
ax=axarr[row,col],
scatter_kwds=scatter_bv_kwds,
fitline_kwds=fitline_bv_kwds)
axarr[row, col].spines['bottom'].set_visible(False)
axarr[row, col].spines['left'].set_visible(False)
if row == nrows - 1:
axarr[row, col].set_xlabel(str(
moran_matrix[(col+1)%4, col].varnames['x']).format(col))
axarr[row, col].spines['bottom'].set_visible(True)
else:
axarr[row, col].set_xlabel('')
if col == 0:
axarr[row, col].set_ylabel(('Spatial Lag of '+str(
moran_matrix[row, (row+1)%4].varnames['y'])).format(row))
axarr[row, col].spines['left'].set_visible(True)
else:
axarr[row, col].set_ylabel('')
axarr[row, col].set_title('')
plt.tight_layout()
return fig, axarr
| bsd-3-clause | 2,072,672,532,014,773,200 | 33.943618 | 85 | 0.595164 | false |
alorenzo175/pvlib-python | pvlib/test/test_modelchain.py | 1 | 31545 | import sys
import numpy as np
import pandas as pd
from pvlib import iam, modelchain, pvsystem, temperature
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from pandas.util.testing import assert_series_equal
import pytest
from conftest import fail_on_pvlib_version, requires_scipy, requires_tables
@pytest.fixture(scope='function')
def system(sapm_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = sapm_module_params.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def cec_dc_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def cec_dc_native_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_system(pvsyst_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data, cec_module_cs5p_220m,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(cec_inverter_parameters):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_system(sapm_temperature_cs5p_220m):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def system_no_aoi(cec_module_cs5p_220m, sapm_temperature_cs5p_220m,
cec_inverter_parameters):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverter_parameters = cec_inverter_parameters.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def location():
return Location(32.2, -111, altitude=700)
@pytest.fixture
def weather():
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]},
index=times)
return weather
def test_ModelChain_creation(system, location):
ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).ac
expected = pd.Series(np.array([187.80746494643176, -0.02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_times(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
with pytest.warns(pvlibDeprecationWarning):
mc.run_model(irradiance, times=times)
def test_prepare_inputs_times(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
with pytest.warns(pvlibDeprecationWarning):
mc.prepare_inputs(irradiance, times=times)
def test_prepare_inputs_no_irradiance(system, location):
mc = ModelChain(system, location)
weather = pd.DataFrame()
with pytest.raises(ValueError):
mc.prepare_inputs(weather)
@requires_tables
def test_complete_irradiance_times(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'ghi': 600., 'dhi': 150.}, index=times)
with pytest.warns(pvlibDeprecationWarning):
mc.complete_irradiance(irradiance, times=times)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).ac
expected = pd.Series(np.array([187.94295642, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).ac
expected = pd.Series(np.array([187.94317405, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather(system, location, weather, mocker):
weather['wind_speed'] = 5
weather['temp_air'] = 10
# test with sapm cell temperature model
system.racking_model = 'open_rack'
system.module_type = 'glass_glass'
mc = ModelChain(system, location)
mc.temperature_model = 'sapm'
m_sapm = mocker.spy(system, 'sapm_celltemp')
mc.run_model(weather)
assert m_sapm.call_count == 1
# assert_called_once_with cannot be used with series, so need to use
# assert_series_equal on call_args
assert_series_equal(m_sapm.call_args[0][1], weather['temp_air']) # temp
assert_series_equal(m_sapm.call_args[0][2], weather['wind_speed']) # wind
assert not mc.ac.empty
# test with pvsyst cell temperature model
system.racking_model = 'freestanding'
system.temperature_model_parameters = \
temperature._temperature_model_params('pvsyst', 'freestanding')
mc = ModelChain(system, location)
mc.temperature_model = 'pvsyst'
m_pvsyst = mocker.spy(system, 'pvsyst_celltemp')
mc.run_model(weather)
assert m_pvsyst.call_count == 1
assert_series_equal(m_pvsyst.call_args[0][1], weather['temp_air'])
assert_series_equal(m_pvsyst.call_args[0][2], weather['wind_speed'])
assert not mc.ac.empty
def test_run_model_tracker(system, location, weather, mocker):
system = SingleAxisTracker(
module_parameters=system.module_parameters,
temperature_model_parameters=system.temperature_model_parameters,
inverter_parameters=system.inverter_parameters)
mocker.spy(system, 'singleaxis')
mc = ModelChain(system, location)
mc.run_model(weather)
assert system.singleaxis.call_count == 1
assert (mc.tracking.columns == ['tracker_theta', 'aoi', 'surface_azimuth',
'surface_tilt']).all()
assert mc.ac[0] > 0
assert np.isnan(mc.ac[1])
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('desoto', marks=requires_scipy),
pytest.param('pvsyst', marks=requires_scipy),
pytest.param('singlediode', marks=requires_scipy),
'pvwatts_dc'])
def test_infer_dc_model(system, cec_dc_snl_ac_system, pvsyst_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location, dc_model,
weather, mocker):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'desoto': cec_dc_snl_ac_system,
'pvsyst': pvsyst_dc_snl_ac_system,
'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
dc_model_function = {'sapm': 'sapm',
'cec': 'calcparams_cec',
'desoto': 'calcparams_desoto',
'pvsyst': 'calcparams_pvsyst',
'singlediode': 'calcparams_desoto',
'pvwatts_dc': 'pvwatts_dc'}
temp_model_function = {'sapm': 'sapm',
'cec': 'sapm',
'desoto': 'sapm',
'pvsyst': 'pvsyst',
'singlediode': 'sapm',
'pvwatts_dc': 'sapm'}
temp_model_params = {'sapm': {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3},
'pvsyst': {'u_c': 29.0, 'u_v': 0}}
system = dc_systems[dc_model]
system.temperature_model_parameters = temp_model_params[
temp_model_function[dc_model]]
# remove Adjust from model parameters for desoto, singlediode
if dc_model in ['desoto', 'singlediode']:
system.module_parameters.pop('Adjust')
m = mocker.spy(system, dc_model_function[dc_model])
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss',
temperature_model=temp_model_function[dc_model])
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.dc, (pd.Series, pd.DataFrame))
@pytest.mark.parametrize('dc_model', [
'sapm',
pytest.param('cec', marks=requires_scipy),
pytest.param('cec_native', marks=requires_scipy)])
def test_infer_spectral_model(location, system, cec_dc_snl_ac_system,
cec_dc_native_snl_ac_system, dc_model):
dc_systems = {'sapm': system,
'cec': cec_dc_snl_ac_system,
'cec_native': cec_dc_native_snl_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location,
orientation_strategy='None', aoi_model='physical')
assert isinstance(mc, ModelChain)
@pytest.mark.parametrize('temp_model', [
'sapm', pytest.param('pvsyst', marks=requires_scipy)])
def test_infer_temp_model(location, system, pvsyst_dc_snl_ac_system,
temp_model):
dc_systems = {'sapm': system,
'pvsyst': pvsyst_dc_snl_ac_system}
system = dc_systems[temp_model]
mc = ModelChain(system, location,
orientation_strategy='None', aoi_model='physical',
spectral_model='no_loss')
assert isinstance(mc, ModelChain)
@requires_scipy
def test_infer_temp_model_invalid(location, system):
system.temperature_model_parameters.pop('a')
with pytest.raises(ValueError):
ModelChain(system, location, orientation_strategy='None',
aoi_model='physical', spectral_model='no_loss')
@requires_scipy
def test_temperature_model_inconsistent(location, system):
with pytest.raises(ValueError):
ModelChain(system, location, orientation_strategy='None',
aoi_model='physical', spectral_model='no_loss',
temperature_model='pvsyst')
def test_dc_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'poadc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model=poadc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert not mc.ac.empty
def acdc(mc):
mc.ac = mc.dc
@pytest.mark.parametrize('ac_model', [
'snlinverter', pytest.param('adrinverter', marks=requires_scipy),
'pvwatts'])
def test_ac_models(system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, weather, mocker):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
if ac_model == 'pvwatts':
ac_model += '_ac'
m = mocker.spy(system, ac_model)
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.ac, pd.Series)
assert not mc.ac.empty
assert mc.ac[1] < 1
def test_ac_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'acdc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model=acdc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert_series_equal(mc.ac, mc.dc)
assert not mc.ac.empty
def constant_aoi_loss(mc):
mc.aoi_modifier = 0.9
@pytest.mark.parametrize('aoi_model', [
'sapm', 'ashrae', 'physical', 'martin_ruiz'
])
def test_aoi_models(system, location, aoi_model, weather, mocker):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
m = mocker.spy(system, 'get_iam')
mc.run_model(weather=weather)
assert m.call_count == 1
assert isinstance(mc.ac, pd.Series)
assert not mc.ac.empty
assert mc.ac[0] > 150 and mc.ac[0] < 200
assert mc.ac[1] < 1
def test_aoi_model_no_loss(system, location, weather):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert mc.aoi_modifier == 1.0
assert not mc.ac.empty
assert mc.ac[0] > 150 and mc.ac[0] < 200
assert mc.ac[1] < 1
def test_aoi_model_user_func(system, location, weather, mocker):
m = mocker.spy(sys.modules[__name__], 'constant_aoi_loss')
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=constant_aoi_loss, spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert mc.aoi_modifier == 0.9
assert not mc.ac.empty
assert mc.ac[0] > 140 and mc.ac[0] < 200
assert mc.ac[1] < 1
@pytest.mark.parametrize('aoi_model', [
'sapm', 'ashrae', 'physical', 'martin_ruiz'
])
def test_infer_aoi_model(location, system_no_aoi, aoi_model):
for k in iam._IAM_MODEL_PARAMS[aoi_model]:
system_no_aoi.module_parameters.update({k: 1.0})
mc = ModelChain(system_no_aoi, location,
orientation_strategy='None',
spectral_model='no_loss')
assert isinstance(mc, ModelChain)
def test_infer_aoi_model_invalid(location, system_no_aoi):
exc_text = 'could not infer AOI model'
with pytest.raises(ValueError, match=exc_text):
ModelChain(system_no_aoi, location, orientation_strategy='None',
spectral_model='no_loss')
def constant_spectral_loss(mc):
mc.spectral_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('spectral_model', [
'sapm', 'first_solar', 'no_loss', constant_spectral_loss
])
def test_spectral_models(system, location, spectral_model, weather):
# add pw to weather dataframe
weather['precipitable_water'] = [0.3, 0.5]
mc = ModelChain(system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model=spectral_model)
spectral_modifier = mc.run_model(weather).spectral_modifier
assert isinstance(spectral_modifier, (pd.Series, float, int))
def constant_losses(mc):
mc.losses = 0.9
mc.dc *= mc.losses
def test_losses_models_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
age = 1
pvwatts_dc_pvwatts_ac_system.losses_parameters = dict(age=age)
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='pvwatts')
mc.run_model(weather)
assert m.call_count == 1
m.assert_called_with(age=age)
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert not mc.ac.empty
# check that we're applying correction to dc
# GH 696
dc_with_loss = mc.dc
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
mc.run_model(weather)
assert not np.allclose(mc.dc, dc_with_loss, equal_nan=True)
def test_losses_models_ext_def(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'constant_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model=constant_losses)
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.ac, (pd.Series, pd.DataFrame))
assert mc.losses == 0.9
assert not mc.ac.empty
def test_losses_models_no_loss(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
assert mc.losses_model == mc.no_extra_losses
mc.run_model(weather)
assert m.call_count == 0
assert mc.losses == 1
def test_invalid_dc_model_params(system, cec_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location):
kwargs = {'dc_model': 'sapm', 'ac_model': 'snlinverter',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temperature_model': 'sapm', 'losses_model': 'no_loss'}
system.module_parameters.pop('A0') # remove a parameter
with pytest.raises(ValueError):
ModelChain(system, location, **kwargs)
kwargs['dc_model'] = 'singlediode'
cec_dc_snl_ac_system.module_parameters.pop('a_ref') # remove a parameter
with pytest.raises(ValueError):
ModelChain(cec_dc_snl_ac_system, location, **kwargs)
kwargs['dc_model'] = 'pvwatts'
kwargs['ac_model'] = 'pvwatts'
pvwatts_dc_pvwatts_ac_system.module_parameters.pop('pdc0')
with pytest.raises(ValueError):
ModelChain(pvwatts_dc_pvwatts_ac_system, location, **kwargs)
@pytest.mark.parametrize('model', [
'dc_model', 'ac_model', 'aoi_model', 'spectral_model',
'temperature_model', 'losses_model'
])
def test_invalid_models(model, system, location):
kwargs = {'dc_model': 'pvwatts', 'ac_model': 'pvwatts',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temperature_model': 'sapm', 'losses_model': 'no_loss'}
kwargs[model] = 'invalid'
with pytest.raises(ValueError):
ModelChain(system, location, **kwargs)
def test_bad_get_orientation():
with pytest.raises(ValueError):
modelchain.get_orientation('bad value')
@fail_on_pvlib_version('0.8')
def test_deprecated_08():
# explicit system creation call because fail_on_pvlib_version
# does not support decorators.
# does not matter what the parameters are, just fake it until we make it
module_parameters = {'R_sh_ref': 1, 'a_ref': 1, 'I_o_ref': 1,
'alpha_sc': 1, 'I_L_ref': 1, 'R_s': 1}
# do not assign PVSystem.temperature_model_parameters
# leave out PVSystem.racking_model and PVSystem.module_type
system = PVSystem(module_parameters=module_parameters)
# deprecated temp_model kwarg
warn_txt = 'temp_model keyword argument is deprecated'
with pytest.warns(pvlibDeprecationWarning, match=warn_txt):
ModelChain(system, location, dc_model='desoto', aoi_model='no_loss',
spectral_model='no_loss', ac_model='snlinverter',
temp_model='sapm')
# provide both temp_model and temperature_model kwargs
warn_txt = 'Provide only one of temperature_model'
with pytest.warns(pvlibDeprecationWarning, match=warn_txt):
ModelChain(system, location, dc_model='desoto', aoi_model='no_loss',
spectral_model='no_loss', ac_model='snlinverter',
temperature_model='sapm', temp_model='sapm')
# conflicting temp_model and temperature_model kwargs
exc_text = 'Conflicting temperature_model'
with pytest.raises(ValueError, match=exc_text):
ModelChain(system, location, dc_model='desoto', aoi_model='no_loss',
spectral_model='no_loss', ac_model='snlinverter',
temperature_model='pvsyst', temp_model='sapm')
@requires_scipy
def test_basic_chain_required(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
with pytest.raises(ValueError):
dc, ac = modelchain.basic_chain(
times, latitude, longitude, module_parameters, temp_model_params,
cec_inverter_parameters, altitude=altitude
)
@requires_scipy
def test_basic_chain_alt_az(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, temp_model_params,
cec_inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth)
expected = pd.Series(np.array([115.40352679, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
@requires_scipy
def test_basic_chain_strategy(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
dc, ac = modelchain.basic_chain(
times, latitude, longitude, module_parameters, temp_model_params,
cec_inverter_parameters, orientation_strategy='south_at_latitude_tilt',
altitude=altitude)
expected = pd.Series(np.array([183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
@requires_scipy
def test_basic_chain_altitude_pressure(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, temp_model_params,
cec_inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
pressure=93194)
expected = pd.Series(np.array([116.595664887, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
dc, ac = modelchain.basic_chain(times, latitude, longitude,
module_parameters, temp_model_params,
cec_inverter_parameters,
surface_tilt=surface_tilt,
surface_azimuth=surface_azimuth,
altitude=altitude)
expected = pd.Series(np.array([116.595664887, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
@pytest.mark.parametrize('strategy, strategy_str', [
('south_at_latitude_tilt', 'south_at_latitude_tilt'),
(None, 'None')]) # GitHub issue 352
def test_ModelChain___repr__(system, location, strategy, strategy_str):
mc = ModelChain(system, location, orientation_strategy=strategy,
name='my mc')
expected = '\n'.join([
'ModelChain: ',
' name: my mc',
' orientation_strategy: ' + strategy_str,
' clearsky_model: ineichen',
' transposition_model: haydavies',
' solar_position_method: nrel_numpy',
' airmass_model: kastenyoung1989',
' dc_model: sapm',
' ac_model: snlinverter',
' aoi_model: sapm_aoi_loss',
' spectral_model: sapm_spectral_loss',
' temperature_model: sapm_temp',
' losses_model: no_extra_losses'
])
assert mc.__repr__() == expected
@requires_scipy
def test_complete_irradiance_clean_run(system, location):
"""The DataFrame should not change if all columns are passed"""
mc = ModelChain(system, location)
times = pd.date_range('2010-07-05 9:00:00', periods=2, freq='H')
i = pd.DataFrame(
{'dni': [2, 3], 'dhi': [4, 6], 'ghi': [9, 5]}, index=times)
mc.complete_irradiance(i)
assert_series_equal(mc.weather['dni'],
pd.Series([2, 3], index=times, name='dni'))
assert_series_equal(mc.weather['dhi'],
pd.Series([4, 6], index=times, name='dhi'))
assert_series_equal(mc.weather['ghi'],
pd.Series([9, 5], index=times, name='ghi'))
@requires_scipy
def test_complete_irradiance(system, location):
"""Check calculations"""
mc = ModelChain(system, location)
times = pd.date_range('2010-07-05 7:00:00-0700', periods=2, freq='H')
i = pd.DataFrame({'dni': [49.756966, 62.153947],
'ghi': [372.103976116, 497.087579068],
'dhi': [356.543700, 465.44400]}, index=times)
with pytest.warns(UserWarning):
mc.complete_irradiance(i[['ghi', 'dni']])
assert_series_equal(mc.weather['dhi'],
pd.Series([356.543700, 465.44400],
index=times, name='dhi'))
with pytest.warns(UserWarning):
mc.complete_irradiance(i[['dhi', 'dni']])
assert_series_equal(mc.weather['ghi'],
pd.Series([372.103976116, 497.087579068],
index=times, name='ghi'))
mc.complete_irradiance(i[['dhi', 'ghi']])
assert_series_equal(mc.weather['dni'],
pd.Series([49.756966, 62.153947],
index=times, name='dni'))
| bsd-3-clause | -8,124,117,412,908,788,000 | 39.184713 | 79 | 0.614265 | false |
rrrrrr8/vnpy | vnpy/api/lbank/test.py | 1 | 1536 | # encoding: UTF-8
from six.moves import input
from time import time
from vnlbank import LbankRestApi, LbankWebsocketApi
API_KEY = '132a36ce-ad1c-409a-b48c-09b7877ae49b'
SECRET_KEY = '319320BF875297E7F4050E1195B880E8'
#----------------------------------------------------------------------
def restTest():
""""""
# 创建API对象并初始化
api = LbankRestApi()
api.init(API_KEY, SECRET_KEY)
api.start(1)
# 测试
#api.addReq('GET', '/currencyPairs.do', {}, api.onData)
#api.addReq('GET', '/accuracy.do', {}, api.onData)
#api.addReq('GET', '/ticker.do', {'symbol': 'eth_btc'}, api.onData)
#api.addReq('GET', '/depth.do', {'symbol': 'eth_btc', 'size': '5'}, api.onData)
#api.addReq('post', '/user_info.do', {}, api.onData)
req = {
'symbol': 'sc_btc',
'current_page': '1',
'page_length': '50'
}
api.addReq('POST', '/orders_info_no_deal.do', req, api.onData)
# 阻塞
input()
#----------------------------------------------------------------------
def wsTest():
""""""
ws = LbankWebsocketApi()
ws.start()
channels = [
'lh_sub_spot_eth_btc_depth_20',
'lh_sub_spot_eth_btc_trades',
'lh_sub_spot_eth_btc_ticker'
]
for channel in channels:
req = {
'event': 'addChannel',
'channel': channel
}
ws.sendReq(req)
# 阻塞
input()
if __name__ == '__main__':
restTest()
#wsTest() | mit | 5,003,571,435,686,934,000 | 21.522388 | 83 | 0.479443 | false |
luci/luci-py | appengine/swarming/remote_smoke_test.py | 2 | 7911 | #!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Integration test for the Swarming server."""
import json
import logging
import optparse
import os
import subprocess
import sys
import tempfile
import threading
import time
from six.moves import queue
APP_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_DIR = os.path.dirname(os.path.dirname(APP_DIR))
CLIENT_DIR = os.path.join(CHECKOUT_DIR, 'client')
SWARMING_SCRIPT = os.path.join(CLIENT_DIR, 'swarming.py')
sys.path.insert(0, CLIENT_DIR)
sys.path.insert(0, os.path.join(CLIENT_DIR, 'third_party'))
from depot_tools import fix_encoding
from utils import file_path
sys.path.pop(0)
sys.path.pop(0)
def gen_isolated(isolate, script, includes=None):
"""Archives a script to `isolate` server."""
tmp = tempfile.mkdtemp(prefix='swarming_smoke')
data = {
'variables': {
'command': ['python', '-u', 'script.py'],
'files': ['script.py'],
},
}
try:
with open(os.path.join(tmp, 'script.py'), 'wb') as f:
f.write(script)
path = os.path.join(tmp, 'script.isolate')
with open(path, 'wb') as f:
# This file is actually python but it's #closeenough.
json.dump(data, f, sort_keys=True, separators=(',', ':'))
isolated = os.path.join(tmp, 'script.isolated')
cmd = [
os.path.join(CLIENT_DIR, 'isolate.py'), 'archive',
'-I', isolate, '-i', path, '-s', isolated,
]
out = subprocess.check_output(cmd)
if includes:
# Mangle the .isolated to include another one. A bit hacky but works well.
# In practice, we'd need to add a --include flag to isolate.py archive or
# something.
with open(isolated, 'rb') as f:
data = json.load(f)
data['includes'] = includes
with open(isolated, 'wb') as f:
json.dump(data, f, sort_keys=True, separators=(',', ':'))
cmd = [
os.path.join(CLIENT_DIR, 'isolateserver.py'), 'archive',
'-I', isolate, '--namespace', 'default-gzip', isolated,
]
out = subprocess.check_output(cmd)
return out.split(' ', 1)[0]
finally:
file_path.rmtree(tmp)
def capture(cmd, **kwargs):
"""Captures output and return exit code."""
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
out = proc.communicate()[0]
return out, proc.returncode
def test_normal(swarming, isolate, extra_flags):
"""Runs a normal task that succeeds."""
h = gen_isolated(isolate, 'print(\'SUCCESS\')')
subprocess.check_output(
[SWARMING_SCRIPT, 'run', '-S', swarming, '-I', isolate, h] + extra_flags)
return 'SUCCESS'
def test_expiration(swarming, isolate, extra_flags):
"""Schedule a task that cannot be scheduled and expire."""
h = gen_isolated(isolate, 'print(\'SUCCESS\')')
start = time.time()
out, exitcode = capture(
[
SWARMING_SCRIPT, 'run', '-S', swarming, '-I', isolate, h,
'--expiration', '30', '-d', 'invalid', 'always',
] + extra_flags)
duration = time.time() - start
if exitcode != 1:
return 'Unexpected exit code: %d' % exitcode
# TODO(maruel): Shouldn't take more than a minute or so.
if duration < 30 or duration > 120:
return 'Unexpected expiration timeout: %d\n%s' % (duration, out)
return 'SUCCESS'
def test_io_timeout(swarming, isolate, extra_flags):
"""Runs a task that triggers IO timeout."""
h = gen_isolated(
isolate,
'import time\n'
'print(\'SUCCESS\')\n'
'time.sleep(40)\n'
'print(\'FAILURE\')')
start = time.time()
out, exitcode = capture(
[
SWARMING_SCRIPT, 'run', '-S', swarming, '-I', isolate, h,
'--io-timeout', '30',
] + extra_flags)
duration = time.time() - start
if exitcode != 1:
return 'Unexpected exit code: %d\n%s' % (exitcode, out)
if duration < 30:
return 'Unexpected fast execution: %d' % duration
return 'SUCCESS'
def test_hard_timeout(swarming, isolate, extra_flags):
"""Runs a task that triggers hard timeout."""
h = gen_isolated(
isolate, 'import time\n'
'for i in range(6):'
' print(\'.\')\n'
' time.sleep(10)\n')
start = time.time()
out, exitcode = capture(
[
SWARMING_SCRIPT, 'run', '-S', swarming, '-I', isolate, h,
'--hard-timeout', '30',
] + extra_flags)
duration = time.time() - start
if exitcode != 1:
return 'Unexpected exit code: %d\n%s' % (exitcode, out)
if duration < 30:
return 'Unexpected fast execution: %d' % duration
return 'SUCCESS'
def test_reentrant(swarming, isolate, extra_flags):
"""Runs a task that triggers a child task.
To be able to do so, it archives all of ../../client/.
Because the parent task blocks on the child task, it requires at least 2 bots
alive.
"""
# First isolate the whole client directory.
cmd = [
os.path.join(CLIENT_DIR, 'isolateserver.py'), 'archive',
'-I', isolate, '--namespace', 'default-gzip', '--blacklist', 'tests',
CLIENT_DIR,
]
client_isolated = subprocess.check_output(cmd).split()[0]
logging.info('- %s', client_isolated)
script = '\n'.join((
'import os',
'import subprocess',
'import sys',
'print("Before\\n")',
'print("SWARMING_TASK_ID=%s\\n" % os.environ["SWARMING_TASK_ID"])',
'subprocess.check_call(',
' [sys.executable, "-u", "example/3_swarming_run_auto_upload.py",',
' "-S", "%s",' % swarming,
' "-I", "%s",' % isolate,
' "--verbose",',
' ])',
'print("After\\n")'))
h = gen_isolated(isolate, script, [client_isolated])
subprocess.check_output(
[SWARMING_SCRIPT, 'run', '-S', swarming, '-I', isolate, h] + extra_flags)
return 'SUCCESS'
def get_all_tests():
m = sys.modules[__name__]
return {k[5:]: getattr(m, k) for k in dir(m) if k.startswith('test_')}
def run_test(results, swarming, isolate, extra_flags, name, test_case):
start = time.time()
try:
result = test_case(swarming, isolate, extra_flags)
except Exception as e:
result = e
results.put((name, result, time.time() - start))
def main():
fix_encoding.fix_encoding()
# It's necessary for relative paths in .isolate.
os.chdir(APP_DIR)
parser = optparse.OptionParser()
parser.add_option('-S', '--swarming', help='Swarming server')
parser.add_option('-I', '--isolate-server', help='Isolate server')
parser.add_option('-d', '--dimensions', nargs=2, default=[], action='append')
parser.add_option('-v', '--verbose', action='store_true', help='Logs more')
options, args = parser.parse_args()
if args:
parser.error('Unsupported args: %s' % args)
if not options.swarming:
parser.error('--swarming required')
if not options.isolate_server:
parser.error('--isolate-server required')
if not os.path.isfile(SWARMING_SCRIPT):
parser.error('Invalid checkout, %s does not exist' % SWARMING_SCRIPT)
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
extra_flags = ['--priority', '5', '--tags', 'smoke_test:1']
for k, v in options.dimensions or [('os', 'Linux')]:
extra_flags.extend(('-d', k, v))
# Run all the tests in parallel.
tests = get_all_tests()
results = queue.Queue(maxsize=len(tests))
for name, fn in sorted(tests.items()):
logging.info('%s', name)
t = threading.Thread(
target=run_test, name=name,
args=(results, options.swarming, options.isolate_server, extra_flags,
name, fn))
t.start()
print('%d tests started' % len(tests))
maxlen = max(len(name) for name in tests)
for i in range(len(tests)):
name, result, duration = results.get()
print('[%d/%d] %-*s: %4.1fs: %s' %
(i, len(tests), maxlen, name, duration, result))
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -9,071,344,116,533,746,000 | 30.644 | 80 | 0.624321 | false |
crossroadchurch/paul | openlp/plugins/songs/forms/editsongform.py | 1 | 60260 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.plugins.songs.forms.editsongform` module contains the form
used to edit songs.
"""
import logging
import re
import os
import shutil
from PyQt4 import QtCore, QtGui
from openlp.core.common import Registry, RegistryProperties, AppLocation, UiStrings, check_directory_exists, translate
from openlp.core.lib import FileDialog, PluginStatus, MediaType, create_separated_list
from openlp.core.lib.ui import set_case_insensitive_completer, critical_error_message_box, find_and_set_in_combo_box
from openlp.plugins.songs.lib import VerseType, clean_song
from openlp.plugins.songs.lib.db import Book, Song, Author, AuthorType, Topic, MediaFile
from openlp.plugins.songs.lib.ui import SongStrings
from openlp.plugins.songs.lib.openlyricsxml import SongXML
from openlp.plugins.songs.forms.editsongdialog import Ui_EditSongDialog
from openlp.plugins.songs.forms.editverseform import EditVerseForm
from openlp.plugins.songs.forms.editversechordsform import EditVerseChordsForm
from openlp.plugins.songs.forms.mediafilesform import MediaFilesForm
from openlp.plugins.songs.lib.chords import Chords
log = logging.getLogger(__name__)
class EditSongForm(QtGui.QDialog, Ui_EditSongDialog, RegistryProperties):
"""
Class to manage the editing of a song
"""
log.info('%s EditSongForm loaded', __name__)
def __init__(self, media_item, parent, manager):
"""
Constructor
"""
super(EditSongForm, self).__init__(parent)
self.media_item = media_item
self.song = None
# can this be automated?
self.width = 400
self.setupUi(self)
# Connecting signals and slots
self.song_key_edit.currentIndexChanged.connect(self.on_key_or_transpose_change)
self.transpose_edit.valueChanged.connect(self.on_key_or_transpose_change)
self.author_add_button.clicked.connect(self.on_author_add_button_clicked)
self.author_edit_button.clicked.connect(self.on_author_edit_button_clicked)
self.author_remove_button.clicked.connect(self.on_author_remove_button_clicked)
self.authors_list_view.itemClicked.connect(self.on_authors_list_view_clicked)
self.topic_add_button.clicked.connect(self.on_topic_add_button_clicked)
self.topic_remove_button.clicked.connect(self.on_topic_remove_button_clicked)
self.topics_list_view.itemClicked.connect(self.on_topic_list_view_clicked)
self.copyright_insert_button.clicked.connect(self.on_copyright_insert_button_triggered)
self.verse_add_button.clicked.connect(self.on_verse_add_button_clicked)
self.verse_list_widget.doubleClicked.connect(self.on_verse_edit_all_chords_button_clicked)
self.verse_edit_chords_button.clicked.connect(self.on_verse_edit_chords_button_clicked)
self.verse_edit_all_chords_button.clicked.connect(self.on_verse_edit_all_chords_button_clicked)
self.verse_delete_button.clicked.connect(self.on_verse_delete_button_clicked)
self.verse_list_widget.itemClicked.connect(self.on_verse_list_view_clicked)
self.verse_order_edit.textChanged.connect(self.on_verse_order_text_changed)
self.theme_add_button.clicked.connect(self.theme_manager.on_add_theme)
self.maintenance_button.clicked.connect(self.on_maintenance_button_clicked)
self.from_file_button.clicked.connect(self.on_audio_add_from_file_button_clicked)
self.from_media_button.clicked.connect(self.on_audio_add_from_media_button_clicked)
self.audio_remove_button.clicked.connect(self.on_audio_remove_button_clicked)
self.audio_remove_all_button.clicked.connect(self.on_audio_remove_all_button_clicked)
Registry().register_function('theme_update_list', self.load_themes)
self.preview_button = QtGui.QPushButton()
self.preview_button.setObjectName('preview_button')
self.preview_button.setText(UiStrings().SaveAndPreview)
self.button_box.addButton(self.preview_button, QtGui.QDialogButtonBox.ActionRole)
self.button_box.clicked.connect(self.on_preview)
# Create other objects and forms
self.manager = manager
self.verse_form = EditVerseForm(self)
self.verse_chords_form = EditVerseChordsForm(self)
self.media_form = MediaFilesForm(self)
self.initialise()
self.authors_list_view.setSortingEnabled(False)
self.authors_list_view.setAlternatingRowColors(True)
self.topics_list_view.setSortingEnabled(False)
self.topics_list_view.setAlternatingRowColors(True)
self.audio_list_widget.setAlternatingRowColors(True)
self.find_verse_split = re.compile('---\[\]---\n', re.UNICODE)
self.whitespace = re.compile(r'\W+', re.UNICODE)
self.find_tags = re.compile(u'\{/?\w+\}', re.UNICODE)
def _load_objects(self, cls, combo, cache):
"""
Generically load a set of objects into a cache and a combobox.
"""
objects = self.manager.get_all_objects(cls, order_by_ref=cls.name)
combo.clear()
combo.addItem('')
for obj in objects:
row = combo.count()
combo.addItem(obj.name)
cache.append(obj.name)
combo.setItemData(row, obj.id)
set_case_insensitive_completer(cache, combo)
def _add_author_to_list(self, author, author_type):
"""
Add an author to the author list.
"""
author_item = QtGui.QListWidgetItem(author.get_display_name(author_type))
author_item.setData(QtCore.Qt.UserRole, (author.id, author_type))
self.authors_list_view.addItem(author_item)
def _extract_verse_order(self, verse_order):
"""
Split out the verse order
:param verse_order: The starting verse order
:return: revised order
"""
order = []
order_names = str(verse_order).split()
for item in order_names:
if len(item) == 1:
verse_index = VerseType.from_translated_tag(item, None)
if verse_index is not None:
order.append(VerseType.tags[verse_index] + '1')
else:
# it matches no verses anyway
order.append('')
else:
verse_index = VerseType.from_translated_tag(item[0], None)
if verse_index is None:
# it matches no verses anyway
order.append('')
else:
verse_tag = VerseType.tags[verse_index]
verse_num = item[1:].lower()
order.append(verse_tag + verse_num)
return order
def _validate_verse_list(self, verse_order, verse_count):
"""
Check the verse order list has valid verses
:param verse_order: Verse order
:param verse_count: number of verses
:return: Count of invalid verses
"""
verses = []
invalid_verses = []
verse_names = []
order_names = str(verse_order).split()
order = self._extract_verse_order(verse_order)
for index in range(verse_count):
verse = self.verse_list_widget.item(index, 0)
verse = verse.data(QtCore.Qt.UserRole)
if verse not in verse_names:
verses.append(verse)
verse_names.append('%s%s' % (VerseType.translated_tag(verse[0]), verse[1:]))
for count, item in enumerate(order):
if item not in verses:
invalid_verses.append(order_names[count])
if invalid_verses:
valid = create_separated_list(verse_names)
if len(invalid_verses) > 1:
msg = translate('SongsPlugin.EditSongForm', 'There are no verses corresponding to "%(invalid)s".'
'Valid entries are %(valid)s.\nPlease enter the verses separated by spaces.') % \
{'invalid': ', '.join(invalid_verses), 'valid': valid}
else:
msg = translate('SongsPlugin.EditSongForm', 'There is no verse corresponding to "%(invalid)s".'
'Valid entries are %(valid)s.\nPlease enter the verses separated by spaces.') % \
{'invalid': invalid_verses[0], 'valid': valid}
critical_error_message_box(title=translate('SongsPlugin.EditSongForm', 'Invalid Verse Order'),
message=msg)
return len(invalid_verses) == 0
def _validate_song(self):
"""
Check the validity of the song.
"""
# This checks data in the form *not* self.song. self.song is still
# None at this point.
log.debug('Validate Song')
# Lets be nice and assume the data is correct.
if not self.title_edit.text():
self.song_tab_widget.setCurrentIndex(0)
self.title_edit.setFocus()
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'You need to type in a song title.'))
return False
if self.verse_list_widget.rowCount() == 0:
self.song_tab_widget.setCurrentIndex(0)
self.verse_list_widget.setFocus()
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'You need to type in at least one verse.'))
return False
if(''.join(self.chords_lyrics_list).find('@') != -1) and (self.song_key_edit.currentIndex() == -1):
# Song has chords but no key
critical_error_message_box('SongsPlugin.EditSongForm', 'You need to choose a key for the song.')
return False
if self.authors_list_view.count() == 0:
self.song_tab_widget.setCurrentIndex(1)
self.authors_list_view.setFocus()
critical_error_message_box(message=translate('SongsPlugin.EditSongForm',
'You need to have an author for this song.'))
return False
if self.verse_order_edit.text():
result = self._validate_verse_list(self.verse_order_edit.text(), self.verse_list_widget.rowCount())
if not result:
return False
text = self.song_book_combo_box.currentText()
if self.song_book_combo_box.findText(text, QtCore.Qt.MatchExactly) < 0:
if QtGui.QMessageBox.question(
self, translate('SongsPlugin.EditSongForm', 'Add Book'),
translate('SongsPlugin.EditSongForm', 'This song book does not exist, do you want to add it?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
book = Book.populate(name=text, publisher='')
self.manager.save_object(book)
else:
return False
# Validate tags (lp#1199639)
misplaced_tags = []
verse_tags = []
for i in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(i, 0)
tags = self.find_tags.findall(item.text())
field = item.data(QtCore.Qt.UserRole)
verse_tags.append(field)
if not self._validate_tags(tags):
misplaced_tags.append('%s %s' % (VerseType.translated_name(field[0]), field[1:]))
if misplaced_tags:
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm',
'There are misplaced formatting tags in the following verses:\n\n%s\n\n'
'Please correct these tags before continuing.' % ', '.join(misplaced_tags)))
return False
for tag in verse_tags:
if verse_tags.count(tag) > 26:
# lp#1310523: OpenLyrics allows only a-z variants of one verse:
# http://openlyrics.info/dataformat.html#verse-name
critical_error_message_box(message=translate(
'SongsPlugin.EditSongForm', 'You have %(count)s verses named %(name)s %(number)s. '
'You can have at most 26 verses with the same name' %
{'count': verse_tags.count(tag),
'name': VerseType.translated_name(tag[0]),
'number': tag[1:]}))
return False
return True
def _validate_tags(self, tags, first_time=True):
"""
Validates a list of tags
Deletes the first affiliated tag pair which is located side by side in the list
and call itself recursively with the shortened tag list.
If there is any misplaced tag in the list, either the length of the tag list is not even,
or the function won't find any tag pairs side by side.
If there is no misplaced tag, the length of the list will be zero on any recursive run.
:param tags: A list of tags
:return: True if the function can't find any mismatched tags. Else False.
"""
if first_time:
fixed_tags = []
for i in range(len(tags)):
if tags[i] != '{br}':
fixed_tags.append(tags[i])
tags = fixed_tags
if len(tags) == 0:
return True
if len(tags) % 2 != 0:
return False
for i in range(len(tags)-1):
if tags[i+1] == "{/" + tags[i][1:]:
del tags[i:i+2]
return self._validate_tags(tags, False)
return False
def _process_lyrics(self):
"""
Process the lyric data entered by the user into the OpenLP XML format.
"""
# This method must only be run after the self.song = Song() assignment.
log.debug('_processLyrics')
sxml = None
try:
sxml = SongXML()
multiple = []
for i in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(i, 0)
verse_id = item.data(QtCore.Qt.UserRole)
verse_tag = verse_id[0]
verse_num = verse_id[1:]
sxml.add_verse_to_lyrics(verse_tag, verse_num, item.text())
if verse_num > '1' and verse_tag not in multiple:
multiple.append(verse_tag)
self.song.lyrics = str(sxml.extract_xml(), 'utf-8')
for verse in multiple:
self.song.verse_order = re.sub('([' + verse.upper() + verse.lower() + '])(\W|$)',
r'\g<1>1\2', self.song.verse_order)
except:
log.exception('Problem processing song Lyrics \n%s', sxml.dump_xml())
raise
def _process_chords(self):
"""
Process the chords data entered by the user into the OpenLP XML format.
"""
# This method must only be run after the self.song = Song() assignment.
log.debug('_processChords')
sxml = None
try:
sxml = SongXML()
for row in self.chords_lyrics_list:
for match in row.split('---['):
for count, parts in enumerate(match.split(']---\n')):
if count == 0:
# Processing verse tag
if len(parts) == 0:
continue
# handling carefully user inputted versetags
separator = parts.find(':')
if separator >= 0:
verse_name = parts[0:separator].strip()
verse_num = parts[separator + 1:].strip()
else:
verse_name = parts
verse_num = '1'
verse_index = VerseType.from_loose_input(verse_name)
verse_tag = VerseType.tags[verse_index]
# Later we need to handle v1a as well.
regex = re.compile(r'\D*(\d+)\D*')
match = regex.match(verse_num)
if match:
verse_num = match.group(1)
else:
verse_num = '1'
verse_def = '%s%s' % (verse_tag, verse_num)
else:
# Processing lyrics
if parts.endswith('\n'):
parts = parts.rstrip('\n')
previous_line = '¬¬DONE¬¬'
section_text = ''
for line in parts.split('\n'):
if previous_line == '¬¬DONE¬¬':
if line.rstrip().endswith('@'):
previous_line = line
elif line.startswith('['):
# Break line
section_text += line + '\n'
else:
# Lyrics line
section_text += line.replace("#", "") + '\n'
else:
# Previous line was chords...
if line.rstrip().endswith('@'):
# Two successive lines of chords.
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
previous_line = line
elif line.startswith('['):
# Break line following chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
section_text += line + '\n'
previous_line = '¬¬DONE¬¬'
elif line.replace(" ", "") == '':
# Spacer line following Chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key) + '\n'
section_text += '\n'
previous_line = '¬¬DONE¬¬'
else:
# These are lyrics corresponding to previous chords
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), line, self.song.song_key) + '\n'
previous_line = '¬¬DONE¬¬'
if not previous_line == '¬¬DONE¬¬':
# Process final line of chords stored in previous_line; no corresponding lyrics
section_text += Chords.parseLinesToXml(previous_line.replace('@', ''), '', self.song.song_key)
if section_text.endswith('\n'):
section_text = section_text.rstrip('\n')
sxml.add_verse_to_lyrics(verse_tag, verse_num, section_text)
self.song.chords = str(sxml.extract_xml(), 'utf-8')
except:
log.exception('Problem processing song chords \n%s', sxml.dump_xml())
raise
def keyPressEvent(self, event):
"""
Re-implement the keyPressEvent to react on Return/Enter keys. When some combo boxes have focus we do not want
dialog's default action be triggered but instead our own.
:param event: A QtGui.QKeyEvent event.
"""
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
if self.authors_combo_box.hasFocus() and self.authors_combo_box.currentText():
self.on_author_add_button_clicked()
return
if self.topics_combo_box.hasFocus() and self.topics_combo_box.currentText():
self.on_topic_add_button_clicked()
return
QtGui.QDialog.keyPressEvent(self, event)
def initialise(self):
"""
Set up the form for when it is displayed.
"""
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
self.author_edit_button.setEnabled(False)
self.author_remove_button.setEnabled(False)
self.topic_remove_button.setEnabled(False)
def load_authors(self):
"""
Load the authors from the database into the combobox.
"""
authors = self.manager.get_all_objects(Author, order_by_ref=Author.display_name)
self.authors_combo_box.clear()
self.authors_combo_box.addItem('')
self.authors = []
for author in authors:
row = self.authors_combo_box.count()
self.authors_combo_box.addItem(author.display_name)
self.authors_combo_box.setItemData(row, author.id)
self.authors.append(author.display_name)
set_case_insensitive_completer(self.authors, self.authors_combo_box)
# Types
self.author_types_combo_box.clear()
# Don't iterate over the dictionary to give them this specific order
for author_type in AuthorType.SortedTypes:
self.author_types_combo_box.addItem(AuthorType.Types[author_type], author_type)
def load_topics(self):
"""
Load the topics into the combobox.
"""
self.topics = []
self._load_objects(Topic, self.topics_combo_box, self.topics)
def load_books(self):
"""
Load the song books into the combobox
"""
self.books = []
self._load_objects(Book, self.song_book_combo_box, self.books)
def load_themes(self, theme_list):
"""
Load the themes into a combobox.
"""
self.theme_combo_box.clear()
self.theme_combo_box.addItem('')
self.themes = theme_list
self.theme_combo_box.addItems(theme_list)
set_case_insensitive_completer(self.themes, self.theme_combo_box)
def load_media_files(self):
"""
Load the media files into a combobox.
"""
self.from_media_button.setVisible(False)
for plugin in self.plugin_manager.plugins:
if plugin.name == 'media' and plugin.status == PluginStatus.Active:
self.from_media_button.setVisible(True)
self.media_form.populate_files(plugin.media_item.get_list(MediaType.Audio))
break
def new_song(self):
"""
Blank the edit form out in preparation for a new song.
"""
log.debug('New Song')
self.song = None
self.initialise()
self.song_tab_widget.setCurrentIndex(0)
self.title_edit.clear()
self.alternative_edit.clear()
self.copyright_edit.clear()
self.verse_order_edit.clear()
self.song_key_edit.setCurrentIndex(-1)
self.transpose_edit.setValue(0)
self.comments_edit.clear()
self.ccli_number_edit.clear()
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
self.authors_list_view.clear()
self.topics_list_view.clear()
self.audio_list_widget.clear()
self.title_edit.setFocus()
self.song_book_number_edit.clear()
self.load_authors()
self.load_topics()
self.load_books()
self.load_media_files()
self.theme_combo_box.setEditText('')
self.theme_combo_box.setCurrentIndex(0)
# it's a new song to preview is not possible
self.preview_button.setVisible(False)
self.chords_lyrics_list = []
def load_song(self, song_id, preview=False):
"""
Loads a song.
:param song_id: The song id (int).
:param preview: Should be ``True`` if the song is also previewed (boolean).
"""
log.debug('Load Song')
self.initialise()
self.song_tab_widget.setCurrentIndex(0)
self.load_authors()
self.load_topics()
self.load_books()
self.load_media_files()
self.song = self.manager.get_object(Song, song_id)
self.title_edit.setText(self.song.title)
self.alternative_edit.setText(
self.song.alternate_title if self.song.alternate_title else '')
self.song_key_edit.setCurrentIndex(
self.song_key_edit.findText(self.song.song_key) if self.song.song_key else -1)
self.transpose_edit.setValue(
self.song.transpose_by if self.song.transpose_by else 0)
if self.song.song_book_id != 0:
book_name = self.manager.get_object(Book, self.song.song_book_id)
find_and_set_in_combo_box(self.song_book_combo_box, str(book_name.name))
else:
self.song_book_combo_box.setEditText('')
self.song_book_combo_box.setCurrentIndex(0)
if self.song.theme_name:
find_and_set_in_combo_box(self.theme_combo_box, str(self.song.theme_name))
else:
# Clear the theme combo box in case it was previously set (bug #1212801)
self.theme_combo_box.setEditText('')
self.theme_combo_box.setCurrentIndex(0)
self.copyright_edit.setText(self.song.copyright if self.song.copyright else '')
self.comments_edit.setPlainText(self.song.comments if self.song.comments else '')
self.ccli_number_edit.setText(self.song.ccli_number if self.song.ccli_number else '')
self.song_book_number_edit.setText(self.song.song_number if self.song.song_number else '')
# lazy xml migration for now
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
verse_tags_translated = False
if self.song.lyrics.startswith('<?xml version='):
song_xml = SongXML()
verse_list = song_xml.get_verses(self.song.lyrics)
for count, verse in enumerate(verse_list):
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
# This silently migrates from localized verse type markup.
# If we trusted the database, this would be unnecessary.
verse_tag = verse[0]['type']
index = None
if len(verse_tag) > 1:
index = VerseType.from_translated_string(verse_tag)
if index is None:
index = VerseType.from_string(verse_tag, None)
else:
verse_tags_translated = True
if index is None:
index = VerseType.from_tag(verse_tag)
verse[0]['type'] = VerseType.tags[index]
if verse[0]['label'] == '':
verse[0]['label'] = '1'
verse_def = '%s%s' % (verse[0]['type'], verse[0]['label'])
item = QtGui.QTableWidgetItem(verse[1])
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setItem(count, 0, item)
else:
verses = self.song.lyrics.split('\n\n')
for count, verse in enumerate(verses):
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
item = QtGui.QTableWidgetItem(verse)
verse_def = '%s%s' % (VerseType.tags[VerseType.Verse], str(count + 1))
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setItem(count, 0, item)
if self.song.verse_order:
# we translate verse order
translated = []
for verse_def in self.song.verse_order.split():
verse_index = None
if verse_tags_translated:
verse_index = VerseType.from_translated_tag(verse_def[0], None)
if verse_index is None:
verse_index = VerseType.from_tag(verse_def[0])
verse_tag = VerseType.translated_tags[verse_index].upper()
translated.append('%s%s' % (verse_tag, verse_def[1:]))
self.verse_order_edit.setText(' '.join(translated))
else:
self.verse_order_edit.setText('')
self.tag_rows()
# clear the results
self.authors_list_view.clear()
for author_song in self.song.authors_songs:
self._add_author_to_list(author_song.author, author_song.author_type)
# clear the results
self.topics_list_view.clear()
for topic in self.song.topics:
topic_name = QtGui.QListWidgetItem(str(topic.name))
topic_name.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_name)
self.audio_list_widget.clear()
for media in self.song.media_files:
media_file = QtGui.QListWidgetItem(os.path.split(media.file_name)[1])
media_file.setData(QtCore.Qt.UserRole, media.file_name)
self.audio_list_widget.addItem(media_file)
self.title_edit.setFocus()
# Hide or show the preview button.
self.preview_button.setVisible(preview)
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
# Process chords XML
if self.song.chords:
song_2_xml = SongXML()
verse_chords_xml = song_2_xml.get_verses(self.song.chords)
self.chords_lyrics_list = []
for count, verse in enumerate(verse_chords_xml):
# This silently migrates from localized verse type markup.
# If we trusted the database, this would be unnecessary.
verse_tag = verse[0]['type']
index = None
if len(verse_tag) > 1:
index = VerseType.from_translated_string(verse_tag)
if index is None:
index = VerseType.from_string(verse_tag, None)
else:
verse_tags_translated = True
if index is None:
index = VerseType.from_tag(verse_tag)
verse[0]['type'] = VerseType.tags[index]
if verse[0]['label'] == '':
verse[0]['label'] = '1'
verse_tag = VerseType.translated_name(verse[0]['type'])
self.chords_lyrics_item = '---[%s:%s]---\n' % (verse_tag, verse[0]['label'])
for line in verse[1].split('\n'):
if line == '':
self.chords_lyrics_item += '\n'
else:
parsed_line = Chords.parseXmlToLines(line)
if not parsed_line[0].strip() == '':
self.chords_lyrics_item += parsed_line[0]
self.chords_lyrics_item += '@\n'
if not parsed_line[1].replace('#', '').strip() == '':
self.chords_lyrics_item += parsed_line[1]
self.chords_lyrics_item += '\n'
if self.chords_lyrics_item.endswith('\n'):
self.chords_lyrics_item = self.chords_lyrics_item.rstrip('\n')
self.chords_lyrics_list.append(self.chords_lyrics_item)
else:
# Only have lyrics for this song, so load them into the list...
self.chords_lyrics_list = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
field = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_name(field[0])
verse_num = field[1:]
self.chords_lyrics_item = '---[%s:%s]---\n' % (verse_tag, verse_num)
self.chords_lyrics_item += item.text()
self.chords_lyrics_list.append(self.chords_lyrics_item)
def tag_rows(self):
"""
Tag the Song List rows based on the verse list
"""
row_label = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
verse_def = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_tag(verse_def[0])
row_def = '%s%s' % (verse_tag, verse_def[1:])
row_label.append(row_def)
self.verse_list_widget.setVerticalHeaderLabels(row_label)
self.verse_list_widget.resizeRowsToContents()
self.verse_list_widget.repaint()
def on_author_add_button_clicked(self):
"""
Add the author to the list of authors associated with this song when the button is clicked.
"""
item = int(self.authors_combo_box.currentIndex())
# Also remove commas from author names. Songs that have authors with commas in
# their names are re-added to database when a service plan containing themis loaded
text = self.authors_combo_box.currentText().strip(' \r\n\t').replace(',','')
author_type = self.author_types_combo_box.itemData(self.author_types_combo_box.currentIndex())
# This if statement is for OS X, which doesn't seem to work well with
# the QCompleter auto-completion class. See bug #812628.
if text in self.authors:
# Index 0 is a blank string, so add 1
item = self.authors.index(text) + 1
if item == 0 and text:
if QtGui.QMessageBox.question(
self,
translate('SongsPlugin.EditSongForm', 'Add Author'),
translate('SongsPlugin.EditSongForm', 'This author does not exist, do you want to add them?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
if text.find(' ') == -1:
author = Author.populate(first_name='', last_name='', display_name=text)
else:
author = Author.populate(first_name=text.rsplit(' ', 1)[0], last_name=text.rsplit(' ', 1)[1],
display_name=text)
self.manager.save_object(author)
self._add_author_to_list(author, author_type)
self.load_authors()
self.authors_combo_box.setCurrentIndex(0)
else:
return
elif item > 0:
item_id = (self.authors_combo_box.itemData(item))
author = self.manager.get_object(Author, item_id)
if self.authors_list_view.findItems(author.get_display_name(author_type), QtCore.Qt.MatchExactly):
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'This author is already in the list.'))
else:
self._add_author_to_list(author, author_type)
self.authors_combo_box.setCurrentIndex(0)
else:
QtGui.QMessageBox.warning(
self, UiStrings().NISs,
translate('SongsPlugin.EditSongForm', 'You have not selected a valid author. Either select an author '
'from the list, or type in a new author and click the "Add Author to Song" button to add '
'the new author.'))
def on_authors_list_view_clicked(self):
"""
Run a set of actions when an author in the list is selected (mainly enable the delete button).
"""
count = self.authors_list_view.count()
if count > 0:
self.author_edit_button.setEnabled(True)
if count > 1:
# There must be at least one author
self.author_remove_button.setEnabled(True)
def on_author_edit_button_clicked(self):
"""
Show a dialog to change the type of an author when the edit button is clicked
"""
self.author_edit_button.setEnabled(False)
item = self.authors_list_view.currentItem()
author_id, author_type = item.data(QtCore.Qt.UserRole)
choice, ok = QtGui.QInputDialog.getItem(self, translate('SongsPlugin.EditSongForm', 'Edit Author Type'),
translate('SongsPlugin.EditSongForm', 'Choose type for this author'),
AuthorType.TranslatedTypes,
current=AuthorType.SortedTypes.index(author_type),
editable=False)
if not ok:
return
author = self.manager.get_object(Author, author_id)
author_type = AuthorType.from_translated_text(choice)
item.setData(QtCore.Qt.UserRole, (author_id, author_type))
item.setText(author.get_display_name(author_type))
def on_author_remove_button_clicked(self):
"""
Remove the author from the list when the delete button is clicked.
"""
if self.authors_list_view.count() <= 2:
self.author_remove_button.setEnabled(False)
item = self.authors_list_view.currentItem()
row = self.authors_list_view.row(item)
self.authors_list_view.takeItem(row)
def on_topic_add_button_clicked(self):
item = int(self.topics_combo_box.currentIndex())
text = self.topics_combo_box.currentText()
if item == 0 and text:
if QtGui.QMessageBox.question(
self, translate('SongsPlugin.EditSongForm', 'Add Topic'),
translate('SongsPlugin.EditSongForm', 'This topic does not exist, do you want to add it?'),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes) == QtGui.QMessageBox.Yes:
topic = Topic.populate(name=text)
self.manager.save_object(topic)
topic_item = QtGui.QListWidgetItem(str(topic.name))
topic_item.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_item)
self.load_topics()
self.topics_combo_box.setCurrentIndex(0)
else:
return
elif item > 0:
item_id = (self.topics_combo_box.itemData(item))
topic = self.manager.get_object(Topic, item_id)
if self.topics_list_view.findItems(str(topic.name), QtCore.Qt.MatchExactly):
critical_error_message_box(
message=translate('SongsPlugin.EditSongForm', 'This topic is already in the list.'))
else:
topic_item = QtGui.QListWidgetItem(str(topic.name))
topic_item.setData(QtCore.Qt.UserRole, topic.id)
self.topics_list_view.addItem(topic_item)
self.topics_combo_box.setCurrentIndex(0)
else:
QtGui.QMessageBox.warning(
self, UiStrings().NISs,
translate('SongsPlugin.EditSongForm', 'You have not selected a valid topic. Either select a topic '
'from the list, or type in a new topic and click the "Add Topic to Song" button to add the '
'new topic.'))
def on_topic_list_view_clicked(self):
self.topic_remove_button.setEnabled(True)
def on_topic_remove_button_clicked(self):
self.topic_remove_button.setEnabled(False)
item = self.topics_list_view.currentItem()
row = self.topics_list_view.row(item)
self.topics_list_view.takeItem(row)
def on_verse_list_view_clicked(self):
self.verse_edit_chords_button.setEnabled(True)
self.verse_delete_button.setEnabled(True)
def on_verse_add_button_clicked(self):
self.verse_chords_form.set_verse('', True)
if self.verse_chords_form.exec_():
after_text, verse_tag, verse_num = self.verse_chords_form.get_verse()
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
self.chords_lyrics_list.append(verse_list_def + after_text)
lyric_text = ''
for line in after_text.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_text += line.replace("#", "") + '\n'
if lyric_text.endswith('\n'):
lyric_text = lyric_text.rstrip('\n')
item = QtGui.QTableWidgetItem(lyric_text)
item.setData(QtCore.Qt.UserRole, verse_def)
item.setText(lyric_text)
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
self.verse_list_widget.setItem(self.verse_list_widget.rowCount() - 1, 0, item)
self.tag_rows()
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_edit_chords_button_clicked(self):
item = self.verse_list_widget.currentItem()
temp_text = '\n'.join(self.chords_lyrics_list[self.verse_list_widget.currentRow()].split('\n')[1:])
if temp_text:
verse_id = item.data(QtCore.Qt.UserRole)
self.verse_chords_form.set_verse(temp_text, True, verse_id)
if self.verse_chords_form.exec_():
after_text, verse_tag, verse_num = self.verse_chords_form.get_verse()
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
self.chords_lyrics_list[self.verse_list_widget.currentRow()] = verse_list_def + after_text
lyric_text = ''
for line in after_text.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_text += line.replace("#", "") + '\n'
if lyric_text.endswith('\n'):
lyric_text = lyric_text.rstrip('\n')
item.setData(QtCore.Qt.UserRole, verse_def)
item.setText(lyric_text)
# number of lines has changed, repaint the list moving the data
if len(temp_text.split('\n')) != len(lyric_text.split('\n')):
temp_list = []
temp_ids = []
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
temp_list.append(item.text())
temp_ids.append(item.data(QtCore.Qt.UserRole))
self.verse_list_widget.clear()
for row, entry in enumerate(temp_list):
item = QtGui.QTableWidgetItem(entry, 0)
item.setData(QtCore.Qt.UserRole, temp_ids[row])
self.verse_list_widget.setItem(row, 0, item)
self.tag_rows()
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_edit_all_chords_button_clicked(self):
"""
Verse edit all chords button (save) pressed
:return:
"""
if not self.chords_lyrics_list == []:
verse_list = ''
for row in self.chords_lyrics_list:
verse_list += row + '\n'
self.verse_chords_form.set_verse(verse_list)
else:
verse_list = ''
if self.verse_list_widget.rowCount() > 0:
for row in range(self.verse_list_widget.rowCount()):
item = self.verse_list_widget.item(row, 0)
field = item.data(QtCore.Qt.UserRole)
verse_tag = VerseType.translated_name(field[0])
verse_num = field[1:]
verse_list += '---[%s:%s]---\n' % (verse_tag, verse_num)
verse_list += item.text()
verse_list += '\n'
self.verse_chords_form.set_verse(verse_list)
else:
self.verse_chords_form.set_verse('')
if not self.verse_chords_form.exec_():
return
verse_chords_list = self.verse_chords_form.get_all_verses()
verse_chords_list = str(verse_chords_list.replace('\r\n', '\n'))
# Update temporary storage of chords and lyrics information (update self.song and database in
# save_song method. Also strip out chord lines and # characters and update verse_list_widget.
self.chords_lyrics_list = []
self.verse_list_widget.clear()
self.verse_list_widget.setRowCount(0)
for row in self.find_verse_split.split(verse_chords_list):
for match in row.split('---['):
chords_lyrics_item = ''
for count, parts in enumerate(match.split(']---\n')):
if count == 0:
# Processing verse tag
if len(parts) == 0:
continue
# handling carefully user inputted versetags
separator = parts.find(':')
if separator >= 0:
verse_name = parts[0:separator].strip()
verse_num = parts[separator + 1:].strip()
else:
verse_name = parts
verse_num = '1'
verse_index = VerseType.from_loose_input(verse_name)
verse_tag = VerseType.tags[verse_index]
# Later we need to handle v1a as well.
regex = re.compile(r'\D*(\d+)\D*')
match = regex.match(verse_num)
if match:
verse_num = match.group(1)
else:
verse_num = '1'
verse_def = '%s%s' % (verse_tag, verse_num)
verse_list_def = '---[%s:%s]---\n' % (VerseType.translated_name(verse_tag), verse_num)
else:
# Processing lyrics
if parts.endswith('\n'):
parts = parts.rstrip('\n')
lyric_parts = ''
for line in parts.split('\n'):
if not line.rstrip().endswith('@'):
# Add on next lyric line, removing any chord padding (#)
lyric_parts += line.replace("#", "") + '\n'
if lyric_parts.endswith('\n'):
lyric_parts = lyric_parts.rstrip('\n')
item = QtGui.QTableWidgetItem(lyric_parts)
item.setData(QtCore.Qt.UserRole, verse_def)
self.verse_list_widget.setRowCount(self.verse_list_widget.rowCount() + 1)
self.verse_list_widget.setItem(self.verse_list_widget.rowCount() - 1, 0, item)
self.chords_lyrics_list.append(verse_list_def + parts)
self.tag_rows()
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
# Check if all verse tags are used.
self.on_verse_order_text_changed(self.verse_order_edit.text())
def on_verse_delete_button_clicked(self):
"""
Verse Delete button pressed
"""
index = self.verse_list_widget.currentRow()
self.verse_list_widget.removeRow(index)
del self.chords_lyrics_list[index]
if not self.verse_list_widget.selectedItems():
self.verse_edit_chords_button.setEnabled(False)
self.verse_delete_button.setEnabled(False)
def on_verse_order_text_changed(self, text):
"""
Checks if the verse order is complete or missing. Shows a error message according to the state of the verse
order.
:param text: The text of the verse order edit (ignored).
"""
# Extract all verses which were used in the order.
verses_in_order = self._extract_verse_order(self.verse_order_edit.text())
# Find the verses which were not used in the order.
verses_not_used = []
for index in range(self.verse_list_widget.rowCount()):
verse = self.verse_list_widget.item(index, 0)
verse = verse.data(QtCore.Qt.UserRole)
if verse not in verses_in_order:
verses_not_used.append(verse)
# Set the label text.
label_text = ''
# No verse order was entered.
if not verses_in_order:
label_text = self.no_verse_order_entered_warning
# The verse order does not contain all verses.
elif verses_not_used:
label_text = self.not_all_verses_used_warning
self.warning_label.setText(label_text)
def on_copyright_insert_button_triggered(self):
"""
Copyright insert button pressed
"""
text = self.copyright_edit.text()
pos = self.copyright_edit.cursorPosition()
sign = SongStrings.CopyrightSymbol
text = text[:pos] + sign + text[pos:]
self.copyright_edit.setText(text)
self.copyright_edit.setFocus()
self.copyright_edit.setCursorPosition(pos + len(sign))
def on_maintenance_button_clicked(self):
"""
Maintenance button pressed
"""
temp_song_book = None
item = int(self.song_book_combo_box.currentIndex())
text = self.song_book_combo_box.currentText()
if item == 0 and text:
temp_song_book = text
self.media_item.song_maintenance_form.exec_(True)
self.load_authors()
self.load_books()
self.load_topics()
if temp_song_book:
self.song_book_combo_box.setEditText(temp_song_book)
def on_preview(self, button):
"""
Save and Preview button clicked.
The Song is valid so as the plugin to add it to preview to see.
:param button: A button (QPushButton).
"""
log.debug('onPreview')
if button.objectName() == 'preview_button':
self.save_song(True)
Registry().execute('songs_preview')
def on_audio_add_from_file_button_clicked(self):
"""
Loads file(s) from the filesystem.
"""
filters = '%s (*)' % UiStrings().AllFiles
file_names = FileDialog.getOpenFileNames(self, translate('SongsPlugin.EditSongForm', 'Open File(s)'), '',
filters)
for filename in file_names:
item = QtGui.QListWidgetItem(os.path.split(str(filename))[1])
item.setData(QtCore.Qt.UserRole, filename)
self.audio_list_widget.addItem(item)
def on_audio_add_from_media_button_clicked(self):
"""
Loads file(s) from the media plugin.
"""
if self.media_form.exec_():
for filename in self.media_form.get_selected_files():
item = QtGui.QListWidgetItem(os.path.split(str(filename))[1])
item.setData(QtCore.Qt.UserRole, filename)
self.audio_list_widget.addItem(item)
def on_audio_remove_button_clicked(self):
"""
Removes a file from the list.
"""
row = self.audio_list_widget.currentRow()
if row == -1:
return
self.audio_list_widget.takeItem(row)
def on_audio_remove_all_button_clicked(self):
"""
Removes all files from the list.
"""
self.audio_list_widget.clear()
def on_up_button_clicked(self):
"""
Moves a file up when the user clicks the up button on the audio tab.
"""
row = self.audio_list_widget.currentRow()
if row <= 0:
return
item = self.audio_list_widget.takeItem(row)
self.audio_list_widget.insertItem(row - 1, item)
self.audio_list_widget.setCurrentRow(row - 1)
def on_down_button_clicked(self):
"""
Moves a file down when the user clicks the up button on the audio tab.
"""
row = self.audio_list_widget.currentRow()
if row == -1 or row > self.audio_list_widget.count() - 1:
return
item = self.audio_list_widget.takeItem(row)
self.audio_list_widget.insertItem(row + 1, item)
self.audio_list_widget.setCurrentRow(row + 1)
def on_key_or_transpose_change(self):
"""
Updates the tranposed key display when the user updates the song key or transpose amount.
"""
if (self.song_key_edit.currentIndex() > -1) and (self.transpose_edit.value() != 0):
self.transposed_key_label.setText('Transposed to: ' + Chords.key_list[
(self.song_key_edit.currentIndex() + self.transpose_edit.value()) % 12])
else:
self.transposed_key_label.setText('')
def clear_caches(self):
"""
Free up auto-completion memory on dialog exit
"""
log.debug('SongEditForm.clearCaches')
self.authors = []
self.themes = []
self.books = []
self.topics = []
def reject(self):
"""
Exit Dialog and do not save
"""
log.debug('SongEditForm.reject')
self.clear_caches()
QtGui.QDialog.reject(self)
def accept(self):
"""
Exit Dialog and save song if valid
"""
log.debug('SongEditForm.accept')
self.clear_caches()
if self._validate_song():
self.save_song()
self.song = None
QtGui.QDialog.accept(self)
def save_song(self, preview=False):
"""
Get all the data from the widgets on the form, and then save it to the database. The form has been validated
and all reference items (Authors, Books and Topics) have been saved before this function is called.
:param preview: Should be ``True`` if the song is also previewed (boolean).
"""
# The Song() assignment. No database calls should be made while a
# Song() is in a partially complete state.
if not self.song:
self.song = Song()
self.song.title = self.title_edit.text()
self.song.alternate_title = self.alternative_edit.text()
self.song.song_key = self.song_key_edit.currentText()
self.song.transpose_by = self.transpose_edit.value()
self.song.copyright = self.copyright_edit.text()
# Values will be set when cleaning the song.
self.song.search_title = ''
self.song.search_lyrics = ''
self.song.verse_order = ''
self.song.comments = self.comments_edit.toPlainText()
order_text = self.verse_order_edit.text()
order = []
for item in order_text.split():
verse_tag = VerseType.tags[VerseType.from_translated_tag(item[0])]
verse_num = item[1:].lower()
order.append('%s%s' % (verse_tag, verse_num))
self.song.verse_order = ' '.join(order)
self.song.ccli_number = self.ccli_number_edit.text()
self.song.song_number = self.song_book_number_edit.text()
book_name = self.song_book_combo_box.currentText()
if book_name:
self.song.book = self.manager.get_object_filtered(Book, Book.name == book_name)
else:
self.song.book = None
theme_name = self.theme_combo_box.currentText()
if theme_name:
self.song.theme_name = theme_name
else:
self.song.theme_name = None
self._process_lyrics()
self._process_chords()
self.song.authors_songs = []
for row in range(self.authors_list_view.count()):
item = self.authors_list_view.item(row)
self.song.add_author(self.manager.get_object(Author, item.data(QtCore.Qt.UserRole)[0]),
item.data(QtCore.Qt.UserRole)[1])
self.song.topics = []
for row in range(self.topics_list_view.count()):
item = self.topics_list_view.item(row)
topic_id = (item.data(QtCore.Qt.UserRole))
topic = self.manager.get_object(Topic, topic_id)
if topic is not None:
self.song.topics.append(topic)
# Save the song here because we need a valid id for the audio files.
clean_song(self.manager, self.song)
self.manager.save_object(self.song)
audio_files = [a.file_name for a in self.song.media_files]
log.debug(audio_files)
save_path = os.path.join(AppLocation.get_section_data_path(self.media_item.plugin.name), 'audio',
str(self.song.id))
check_directory_exists(save_path)
self.song.media_files = []
files = []
for row in range(self.audio_list_widget.count()):
item = self.audio_list_widget.item(row)
filename = item.data(QtCore.Qt.UserRole)
if not filename.startswith(save_path):
old_file, filename = filename, os.path.join(save_path, os.path.split(filename)[1])
shutil.copyfile(old_file, filename)
files.append(filename)
media_file = MediaFile()
media_file.file_name = filename
media_file.type = 'audio'
media_file.weight = row
self.song.media_files.append(media_file)
for audio in audio_files:
if audio not in files:
try:
os.remove(audio)
except:
log.exception('Could not remove file: %s', audio)
if not files:
try:
os.rmdir(save_path)
except OSError:
log.exception('Could not remove directory: %s', save_path)
clean_song(self.manager, self.song)
self.manager.save_object(self.song)
self.media_item.auto_select_id = self.song.id
| gpl-2.0 | -344,718,849,953,748,350 | 46.429921 | 143 | 0.552394 | false |
zhuwbigdata/hadoop-admin-utils | ambari-utils/python2/getConfig2.py | 1 | 5465 | #!/usr/bin/python
# Get a handle to the API client
import ssl
import sys
import pprint
import argparse
import requests
SERVICE_TYPE_MAP = {
'zookeeper': 'ZOOKEEPER',
'hdfs': 'HDFS',
'hbase': 'HBASE',
'yarn': 'YARN',
'oozie': 'OOZIE',
'hbase': 'HBASE',
'kafka': 'KAFKA',
}
SERVICE_ROLE_TYPE_MAP = {
'zookeeper_server': 'SERVER',
'namenode': 'NAMENODE',
'resourcemanager': 'RESOURCEMANAGER',
'oozie_server': 'OOZIE_SERVER',
'hbase_restserver': 'HBASERESTSERVER',
'kafka_broker': 'KAFKA_BROKER',
}
CONFIG_KEY_VALUE_MAP = {
'NAME_NODE': None,
'NAME_NODE_PORT': '8020',
'JOB_TRACKER': None,
'RESOURCEMANAGER_ADDRESS': '8032',
'OOZIE_URL': None,
'OOZIE_HTTP_PORT': '11000',
'OOZIE_HTTPS_PORT': '11443',
'OOZIE_USE_SSL': 'false',
'ZOOKEEPER_QUORUM': None,
'ZOOKEEPER_PORT': '2181',
'HBASE_REST_IP': None,
'HBASE_REST_PORT': '20550',
'KAFKA_BROKER': None,
'KAFKA_SECURITY_PROTOCOL': 'PLAINTEXT',
}
CONFIG_PROPERTY_MAP = {
'zk_client_port': 'clientPort',
'hdf_nn_ns': 'dfs_federation_namenode_nameservice',
'hdf_nn_port': 'namenode_port',
'yarn_rm_address': 'yarn_resourcemanager_addres',
'oozie_http_port': 'oozie_http_port',
'oozie_https_port': 'oozie_https_port',
'oozie_use_ssl': 'oozie_use_ssl',
'oozie_load_balancer': 'oozie_load_balancer',
'hbase_rs_port': 'hbase_restserver_port',
'hbase_rs_host': 'hbase_restserver_host',
'kafka_client_security_protocol': 'security.inter.broker.protocol',
}
AMBARI_DOMAIN='172.16.95.169'
AMBARI_PORT='8080'
AMBARI_USER_ID='raj_ops'
AMBARI_USER_PW='raj_ops'
def ambariREST( restAPI ) :
url = "http://"+AMBARI_DOMAIN+":"+AMBARI_PORT+restAPI
r= requests.get(url, auth=(AMBARI_USER_ID, AMBARI_USER_PW))
return(json.loads(r.text));
def rmREST( restAPI ) :
url = "http://"+RM_DOMAIN+":"+RM_PORT+restAPI
r=requests.get(url)
return(json.loads(r.text));
def getClusterVersionAndName() :
json_data = ambariREST("/api/v1/clusters")
cname = json_data["items"][0]["Clusters"]["cluster_name"]
cversion =json_data["items"][0]["Clusters"]["version"]
return cname, cversion, json_data;
def getAmbariHosts() :
restAPI = "/api/v1/hosts"
json_data = ambariREST(restAPI)
return(json_data);
def getConfigGroups() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/config_groups"
json_data = ambariREST(restAPI)
return(json_data);
def getServiceConfigTypes() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/configurations"
json_data = ambariREST(restAPI)
return(json_data);
def getServiceActualConfigurations() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME
json_data = ambariREST(restAPI)
return(json_data);
def getStackVersions() :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/stack_versions/"
json_data = ambariREST(restAPI)
return(json_data);
def getServices( SERVICE) :
restAPI = "/api/v1/clusters/"+CLUSTER_NAME+"/services/"+SERVICE
json_data = ambariREST(restAPI)
return(json_data);
def getResourceManagerInfo() :
restAPI = "/ws/v1/cluster/info"
json_data = rmREST(restAPI)
return(json_data);
def getResourceManagerMetrics() :
restAPI = "/ws/v1/cluster/metrics"
json_data = rmREST(restAPI)
return(json_data);
def getRMschedulerInfo() :
restAPI = "/ws/v1/cluster/scheduler"
json_data = rmREST(restAPI)
return(json_data);
def getAppsSummary() :
restAPI = "/ws/v1/cluster/apps"
json_data = rmREST(restAPI)
return(json_data);
def getAppsStatistics() :
restAPI = "/ws/v1/cluster/appstatictics"
json_data = rmREST(restAPI)
return(json_data);
def getNodesSummary() :
restAPI = "/ws/v1/cluster/nodes"
json_data = rmREST(restAPI)
return(json_data);
def main(cm_fqhn, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled, cm_tls_cafile):
print cm_fqhn, cm_user_name, cm_user_password, cm_cluster_name, cm_tls_enabled, cm_tls_cafile
if cm_tls_enabled == 'false':
print getClusterVersionAndName()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='get configuration from Cloudera Manager API')
parser.add_argument('--cm_fqhn', required=True,
help='Cloudera Manager FQHN')
parser.add_argument('--cm_user_name', required=True,
help='Cloudera Manager User Name')
parser.add_argument('--cm_user_password', required=True,
help='Cloudera Manager User Password')
parser.add_argument('--cm_cluster_name', required=True,
help='Cloudera Manager Cluster Name')
parser.add_argument('--cm_tls_enabled', required=True,
help='Cloudera Manager TLS enabled')
parser.add_argument('--cm_tls_cafile', required=False,
help='Cloudera Manager TLS CA file location')
args = parser.parse_args()
main(cm_fqhn = args.cm_fqhn,
cm_user_name = args.cm_user_name,
cm_user_password = args.cm_user_password,
cm_cluster_name = args.cm_cluster_name,
cm_tls_enabled = args.cm_tls_enabled,
cm_tls_cafile = args.cm_tls_cafile)
| apache-2.0 | -7,083,016,760,662,906,000 | 30.959064 | 116 | 0.613358 | false |
exowanderer/SpitzerDeepLearningNetwork | Python Scripts/spitzer_cal_NALU_train_keras.py | 1 | 14246 | from multiprocessing import set_start_method, cpu_count
set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str,
default='nalu_tf_save_dir/saves_{}'.format(time_now),
help='The tensorflow ckpt save file')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, default=1,
help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, default=0,
help='How many features on the second NALU layer')
ap.add_argument('-ne', '--n_epochs', type=int, default=200,
help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, default=1,
help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, default=32,
help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, default=1e-3,
help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, default=0.75,
help='How much to split the train / test ratio')
ap.add_argument('-rs', '--random_state', type=int, default=42,
help='Integer value to initialize train/test splitting randomization')
ap.add_argument('-v', '--verbose', action="store_true",
help='Whether to set verbosity = True or False (default)')
ap.add_argument('-ds', '--data_set', type=str, default='',
help='The csv file containing the data to predict with')
try:
args = vars(ap.parse_args())
except:
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
arts['random_state'] = ap.get_default('random_state')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
verbose = args['verbose']
data_set_fname = args['data_set']
import pandas as pd
import numpy as np
import pdb
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
import xgboost as xgb
from tqdm import tqdm
from glob import glob
from time import time
start0 = time()
print('BEGIN NEW HyperParameter Optimization.')
from sklearn.metrics import r2_score
''' NALU: Nearual Arithmentic Logical Unit
NALU uses memory and logic gates to train a unique TF layer to modify the gradients of the weights.
This seems to be very smilar to a LSTM layer, but for a non-RNN.
This code has been specifically implemented with tensorflow.
Code source: https://github.com/grananqvist/NALU-tf
Original paper: https://arxiv.org/abs/1808.00508 (Trask et al.)
'''
import numpy as np
import tensorflow as tf
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
def generate_dataset(size=10000, op='sum', n_features=2):
""" Generate dataset for NALU toy problem
Arguments:
size - number of samples to generate
op - the operation that the generated data should represent. sum | prod
Returns:
X - the dataset
Y - the dataset labels
"""
X = np.random.randint(9, size=(size, n_features))
if op == 'prod':
Y = np.prod(X, axis=1, keepdims=True)
else:
Y = np.sum(X, axis=1, keepdims=True)
return X, Y
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
if __name__ == "__main__":
N_FEATURES = features.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = N_FEATURES
if args['n_nalu_neurons'] > 0: N_NALU_NEURONS = args['n_nalu_neurons']
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES, BATCH_SIZE, LEARNING_RATE, N_EPOCHS, TEST_SIZE, RANDOM_STATE)
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features[idx_train], labels[idx_train][:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
N_FEATURES = X_data.shape[-1]
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
prev_layer = nalu_layers['nalu{}'.format(kn-1)]
nalu_layers['nalu{}'.format(kn)] = nalu(prev_layer, N_NALU_NEURONS)
Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression
# loss and train operations
loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss)
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()#max_to_keep=N_EPOCHS)
sess_config = tf.ConfigProto(
device_count={"CPU": cpu_count()},
inter_op_parallelism_threads=cpu_count(),
intra_op_parallelism_threads=cpu_count())
with tf.Session(config=sess_config) as sess:
''' Tensorboard Redouts'''
''' Training R-Squared Score'''
total_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, tf.reduce_mean(Y_true))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, Y_pred)))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
# ''' Testing R-Squared Score'''
# Y_pred_test = Y_pred.eval(feed_dict={X: features[idx_test]})
# total_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, tf.reduce_mean(Y_data_use))))
# unexplained_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, Y_pred_test)))
# R_squared_test = tf.subtract(1, tf.div(unexplained_error, total_error))
''' Loss and RMSE '''
squared_error = tf.square(tf.subtract(Y_true, Y_pred))
loss = tf.reduce_sum(tf.sqrt(tf.cast(squared_error, tf.float32)))
rmse = tf.sqrt(tf.reduce_mean(tf.cast(squared_error, tf.float32)))
''' Declare Scalar Tensorboard Terms'''
tf.summary.scalar('loss', loss)
tf.summary.scalar('RMSE', rmse)
tf.summary.scalar('R_sqrd', R_squared)
''' Declare Histogram Tensorboard Terms'''
# Squared Error Histogram
tf.summary.histogram('SqErr Hist', squared_error)
# NALU Layers Histogram
for kn in range(N_NALU_LAYERS):
tf.summary.histogram('NALU{}'.format(kn), nalu_layers['nalu{}'.format(kn)])
''' Merge all the summaries and write them out to `export_dir` + `/logs_train_`time_now`` '''
merged = tf.summary.merge_all()
''' Output all summaries to `export_dir` + `/logs_train_`time_now`` '''
train_writer = tf.summary.FileWriter(EXPORT_DIR + '/logs_train_{}'.format(time_now),sess.graph)
# test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')
''' END Tensorboard Readout Step'''
sess.run(init_op)
best_test_r2 = 0
for ep in tqdm(range(N_EPOCHS)):
i = 0
gts = 0
# for k in range(N_EPOCHS):
# batch_now = range(k*N_EPOCHS, (k+1)*N_EPOCHS)
while i < len(X_data_use):
xs, ys = X_data_use[i:i+BATCH_SIZE], Y_data_use[i:i+BATCH_SIZE]
_, ys_pred, l = sess.run([train_op, Y_pred, loss],
feed_dict={X: xs, Y_true: ys})
# calculate number of correct predictions from batch
gts += np.sum(np.isclose(ys, ys_pred, atol=1e-4, rtol=1e-4))
i += BATCH_SIZE
ytest_pred = Y_pred.eval(feed_dict={X: features[idx_test]})
test_r2 = r2_score(labels[idx_test][:,None], ytest_pred)
# print("Test R2 Score: {}".format(test_r2_score))
acc = gts/len(Y_data_use)
train_r2 = r2_score(ys, ys_pred)
print('epoch {}, loss: {:.5}, accuracy: {:.5}, Batch R2: {:.5}, Test R2: {:.5}'.format(ep, l, acc, train_r2, test_r2))
output_dict['loss'][ep] = l
output_dict['accuracy'][ep] = acc
output_dict['R2_train'][ep] = train_r2
output_dict['R2_test'][ep] = test_r2
output_dict['chisq_train'][ep] = chisq(ys.flatten(), ys_pred.flatten(), spitzerCalRawData['fluxerr'][i:i+BATCH_SIZE])
output_dict['chisq_test'][ep] = chisq(labels[idx_test], ytest_pred.flatten(), spitzerCalRawData['fluxerr'][idx_test])
save_path = saver.save(sess, EXPORT_DIR + "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
# print("Model saved in path: %s" % save_path)
if test_r2 >= best_test_r2:
best_test_r2 = test_r2
''' Store the Best Scored Test-R2 '''
save_path = saver.save(sess, EXPORT_DIR + "best_test_r2/model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
ep = '_FINAL'
save_path = saver.save(sess, EXPORT_DIR+ "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
print("Model saved in path: %s" % save_path)
try:
pd.DataFrame(output_dict, index=range(N_EPOCHS)).to_csv(EXPORT_DIR+ "model_loss_acc_BatchR2_TestR2_DataFrame.csv")
except Exception as e:
print('DataFrame to CSV broke because', str(e))
'''
with tf.name_scope("loss"):
def tf_nll(labels, output, uncs, coeff=1):
error = output - labels
return tf.reduce_sum(tf.divide(tf.squared_difference(output, labels) , tf.square(uncs)))# + tf.log(tf.square(uncs))
#return tf.reduce_sum(1 * (coeff * np.log(2*np.pi) + coeff * tf.log(uncs) + (0.5/uncs) * tf.pow(error, 2)))
negloglike = tf_nll(labels=y, output=output, uncs=unc)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([negloglike] + reg_losses, name="chisq")
with tf.name_scope("eval"):
accuracy = tf.reduce_mean(tf.squared_difference(output, y, name="accuracy"))
SqErrRatio= tf.divide(accuracy, tf.reduce_mean(tf.squared_difference(y, tf.reduce_mean(y))))
r2_acc = 1.0 - SqErrRatio
chsiqMean = tf_nll(labels=y, output=tf.reduce_mean(y), uncs=unc)
chisqModel= tf_nll(labels=y, output=output, uncs=unc)
rho2_acc = 1.0 - chisqModel / chsiqMean"
]
},mse_summary = tf.summary.scalar('train_acc' , accuracy )
loss_summary = tf.summary.scalar('loss' , loss )
nll_summary = tf.summary.scalar('negloglike', negloglike)
r2s_summary = tf.summary.scalar('r2_acc' , r2_acc )
p2s_summary = tf.summary.scalar('rho2_acc' , rho2_acc )
val_summary = tf.summary.scalar('val_acc' , accuracy )
# hid1_hist = tf.summary.histogram('hidden1', hidden1)
# hid2_hist = tf.summary.histogram('hidden1', hidden1)
# hid3_hist = tf.summary.histogram('hidden1', hidden1)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
''' | mit | -2,823,571,124,492,525,000 | 40.176301 | 186 | 0.610908 | false |
cpn18/track-chart | gps-pi/nmea.py | 1 | 1791 | #!/usr/bin/python3
"""
NMEA Utils
"""
def tpv_to_json(report):
if report is None:
return {"class": "TPV", "mode": 0}
tpv = {
'class': report['class'],
'mode': report['mode'],
}
for field in ['device', 'status', 'time', 'altHAE', 'altMSL', 'alt',
'climb', 'datum', 'depth', 'dgpsAge', 'dgpsSta',
'epc', 'epd', 'eph', 'eps', 'ept', 'epx', 'epy', 'epv',
'geoidSep', 'lat', 'leapseconds', 'lon', 'track', 'magtrack',
'magvar', 'speed', 'ecefx', 'ecefy', 'ecefz', 'ecefpAcc',
'ecefvx', 'ecefvy', 'ecefvz', 'exefvAcc', 'sep', 'relD',
'relE', 'relN', 'velD', 'velE', 'velN', 'wanglem', 'wangler',
'wanglet', 'wspeedr', 'wspeedt']:
if field in report:
tpv[field] = report[field]
return tpv
def sky_to_json(report):
if report is None:
return {"class": "SKY", "satellites": []}
sky = {
'class': report['class'],
'satellites': [],
}
for field in ['device', 'time', 'gdop', 'hdop', 'pdop', 'tdop', 'vdop',
'xdop', 'ydop']:
if field in report:
sky[field] = report[field]
for i in range(len(report['satellites'])):
sat = report['satellites'][i]
prn = {
"PRN": sat['PRN'],
"used": sat['used'],
}
for field in ['az', 'el', 'ss', 'gnssid', 'svid', 'sigid',
'freqid', 'health']:
if field in sat:
prn[field] = sat[field]
sky['satellites'].append(prn)
return sky
def calc_used(sky):
num_sat = len(sky['satellites'])
num_used = 0
for i in range(num_sat):
if sky['satellites'][i]['used'] is True:
num_used += 1
return (num_used, num_sat)
| gpl-3.0 | 2,407,027,658,398,743,600 | 31.563636 | 75 | 0.47962 | false |
paboldin/rally | rally/exceptions.py | 1 | 6958 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from rally.common.i18n import _
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class RallyException(Exception):
"""Base Rally Exception
To correctly use this class, inherit from it and define
a "msg_fmt" property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("%(message)s")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if "%(message)s" in self.msg_fmt:
kwargs.update({"message": message})
super(RallyException, self).__init__(self.msg_fmt % kwargs)
def format_message(self):
return six.text_type(self)
class ImmutableException(RallyException):
msg_fmt = _("This object is immutable.")
class InvalidArgumentsException(RallyException):
msg_fmt = _("Invalid arguments: '%(message)s'")
class InvalidConfigException(RallyException):
msg_fmt = _("This config has invalid schema: `%(message)s`")
class InvalidRunnerResult(RallyException):
msg_fmt = _("Type of result of `%(name)s` runner should be"
" `base.ScenarioRunnerResult`. Got: `%(results_type)s`")
class InvalidTaskException(InvalidConfigException):
msg_fmt = _("Task config is invalid: `%(message)s`")
class NotFoundScenarios(InvalidTaskException):
msg_fmt = _("There are no benchmark scenarios with names: `%(names)s`.")
class InvalidBenchmarkConfig(InvalidTaskException):
msg_fmt = _("Input task is invalid!\n\n"
"Benchmark %(name)s[%(pos)s] has wrong configuration"
"\nBenchmark configuration:\n%(config)s\n"
"\nReason:\n %(reason)s")
class NotFoundException(RallyException):
msg_fmt = _("The resource can not be found.")
class PluginNotFound(NotFoundException):
msg_fmt = _("There is no plugin with name: %(name)s in "
"%(namespace)s namespace.")
class PluginWithSuchNameExists(RallyException):
msg_fmt = _("Plugin with such name: %(name)s already exists in "
"%(namespace)s namespace")
class NoSuchConfigField(NotFoundException):
msg_fmt = _("There is no field in the task config with name `%(name)s`.")
class NoSuchRole(NotFoundException):
msg_fmt = _("There is no role with name `%(role)s`.")
class TaskNotFound(NotFoundException):
msg_fmt = _("Task with uuid=%(uuid)s not found.")
class DeploymentNotFound(NotFoundException):
msg_fmt = _("Deployment %(deployment)s not found.")
class DeploymentNameExists(RallyException):
msg_fmt = _("Deployment name '%(deployment)s' already registered.")
class DeploymentIsBusy(RallyException):
msg_fmt = _("There are allocated resources for the deployment with "
"uuid=%(uuid)s.")
class RallyAssertionError(RallyException):
msg_fmt = _("Assertion error: %(message)s")
class ResourceNotFound(NotFoundException):
msg_fmt = _("Resource with id=%(id)s not found.")
class TimeoutException(RallyException):
msg_fmt = _("Rally tired waiting for %(resource_type)s %(resource_name)s:"
"%(resource_id)s to become %(desired_status)s current "
"status %(resource_status)s")
class GetResourceFailure(RallyException):
msg_fmt = _("Failed to get the resource %(resource)s: %(err)s")
class GetResourceNotFound(GetResourceFailure):
msg_fmt = _("Resource %(resource)s is not found.")
class GetResourceErrorStatus(GetResourceFailure):
msg_fmt = _("Resource %(resource)s has %(status)s status.\n"
"Fault: %(fault)s")
class ScriptError(RallyException):
msg_fmt = _("Script execution failed: %(message)s")
class TaskInvalidStatus(RallyException):
msg_fmt = _("Task `%(uuid)s` in `%(actual)s` status but `%(require)s` is "
"required.")
class ChecksumMismatch(RallyException):
msg_fmt = _("Checksum mismatch for image: %(url)s")
class InvalidAdminException(InvalidArgumentsException):
msg_fmt = _("user %(username)s doesn't have 'admin' role")
class InvalidEndpointsException(InvalidArgumentsException):
msg_fmt = _("wrong keystone credentials specified in your endpoint"
" properties. (HTTP 401)")
class HostUnreachableException(InvalidArgumentsException):
msg_fmt = _("unable to establish connection to the remote host: %(url)s")
class InvalidScenarioArgument(RallyException):
msg_fmt = _("Invalid scenario argument: '%(message)s'")
class BenchmarkSetupFailure(RallyException):
msg_fmt = _("Unable to setup benchmark: '%(message)s'")
class ContextSetupFailure(RallyException):
msg_fmt = _("Unable to setup context '%(ctx_name)s': '%(msg)s'")
class ValidationError(RallyException):
msg_fmt = _("Validation error: %(message)s")
class NoNodesFound(RallyException):
msg_fmt = _("There is no nodes matching filters: %(filters)r")
class UnknownRelease(RallyException):
msg_fmt = _("Unknown release '%(release)s'")
class CleanUpException(RallyException):
msg_fmt = _("Cleanup failed.")
class ImageCleanUpException(CleanUpException):
msg_fmt = _("Image Deletion Failed")
class IncompatiblePythonVersion(RallyException):
msg_fmt = _("Incompatible python version found '%(version)s', "
"required '%(required_version)s'")
class WorkerNotFound(NotFoundException):
msg_fmt = _("Worker %(worker)s could not be found")
class WorkerAlreadyRegistered(RallyException):
msg_fmt = _("Worker %(worker)s already registered")
class SaharaClusterFailure(RallyException):
msg_fmt = _("Sahara cluster %(name)s has failed to %(action)s. "
"Reason: '%(reason)s'")
class LiveMigrateException(RallyException):
msg_fmt = _("Live Migration failed: %(message)s")
class MigrateException(RallyException):
msg_fmt = _("Migration failed: %(message)s")
class InvalidHostException(RallyException):
msg_fmt = _("Live Migration failed: %(message)s")
class MultipleMatchesFound(RallyException):
msg_fmt = _("Found multiple %(needle)s: %(haystack)s")
class TempestConfigCreationFailure(RallyException):
msg_fmt = _("Unable to create Tempest config file: %(message)s")
class TempestResourceCreationFailure(RallyException):
msg_fmt = _("Unable to create resource needed for Tempest: %(message)s")
| apache-2.0 | -226,115,050,532,442,530 | 27.871369 | 78 | 0.685254 | false |
GammaC0de/pyload | src/pyload/plugins/addons/Captcha9Kw.py | 1 | 9746 | # -*- coding: utf-8 -*-
import base64
import re
import time
import urllib.parse
from pyload.core.network.http.exceptions import BadHeader
from ..base.addon import BaseAddon, threaded
class Captcha9Kw(BaseAddon):
__name__ = "Captcha9Kw"
__type__ = "addon"
__version__ = "0.38"
__status__ = "testing"
__pyload_version__ = "0.5"
__config__ = [
("enabled", "bool", "Activated", False),
("check_client", "bool", "Don't use if client is connected", True),
("confirm", "bool", "Confirm Captcha (cost +6 credits)", False),
("captchaperhour", "int", "Captcha per hour", "9999"),
("captchapermin", "int", "Captcha per minute", "9999"),
("prio", "int", "Priority (max 10)(cost +0 -> +10 credits)", "0"),
("queue", "int", "Max. Queue (max 999)", "50"),
(
"hoster_options",
"str",
"Hoster options (format pluginname;prio 1;selfsolve 1;confirm 1;timeout 900|...)",
"",
),
(
"selfsolve",
"bool",
"Selfsolve (manually solve your captcha in your 9kw client if active)",
False,
),
(
"solve_interactive",
"bool",
"Solve ReCaptcha Interactive (cost 30 credits)",
True,
),
("passkey", "password", "API key", ""),
("timeout", "int", "Timeout in seconds (min 60, max 3999)", "900"),
]
__description__ = """Send captchas to 9kw.eu"""
__license__ = "GPLv3"
__authors__ = [
("RaNaN", "[email protected]"),
("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahho[DOT]com"),
]
API_URL = "https://www.9kw.eu/index.cgi"
def get_credits(self):
res = self.load(
self.API_URL,
get={
"apikey": self.config.get("passkey"),
"pyload": "1",
"source": "pyload",
"action": "usercaptchaguthaben",
},
)
if res.isdigit():
self.log_info(self._("{} credits left").format(res))
credits = self.info["credits"] = int(res)
return credits
else:
self.log_error(res)
return 0
@threaded
def _process_captcha(self, task):
if task.is_interactive():
url_p = urllib.parse.urlparse(task.captcha_params["url"])
if url_p.scheme not in ("http", "https"):
self.log_error(self._("Invalid url"))
return
post_data = {
"pageurl": "{}://{}/".format(url_p.scheme, url_p.netloc),
"data-sitekey": task.captcha_params["sitekey"],
"securetoken": task.captcha_params["securetoken"] or "",
}
else:
try:
with open(task.captcha_params["file"], mode="rb") as fp:
data = fp.read()
except IOError as exc:
self.log_error(exc)
return
post_data = {"file-upload-01": base64.b64encode(data)}
pluginname = task.captcha_params["plugin"]
option = {
"min": 2,
"max": 50,
"phrase": 0,
"numeric": 0,
"case_sensitive": 0,
"math": 0,
"prio": min(max(self.config.get("prio"), 0), 10),
"confirm": self.config.get("confirm"),
"timeout": min(max(self.config.get("timeout"), 300), 3999),
"selfsolve": self.config.get("selfsolve"),
"cph": self.config.get("captchaperhour"),
"cpm": self.config.get("captchapermin"),
}
for opt in self.config.get("hoster_options", "").split("|"):
if not opt:
continue
details = (x.strip() for x in opt.split(";"))
if not details or details[0].lower() != pluginname.lower():
continue
for d in details:
hosteroption = d.split(" ")
if len(hosteroption) < 2 or not hosteroption[1].isdigit():
continue
o = hosteroption[0].lower()
if o in option:
option[o] = hosteroption[1]
break
post_data.update(
{
"apikey": self.config.get("passkey"),
"prio": option["prio"],
"confirm": option["confirm"],
"maxtimeout": option["timeout"],
"selfsolve": option["selfsolve"],
"captchaperhour": option["cph"],
"captchapermin": option["cpm"],
"case-sensitive": option["case_sensitive"],
"min_len": option["min"],
"max_len": option["max"],
"phrase": option["phrase"],
"numeric": option["numeric"],
"math": option["math"],
"oldsource": pluginname,
"pyload": 1,
"source": "pyload",
"base64": 0 if task.is_interactive() else 1,
"mouse": 1 if task.is_positional() else 0,
"interactive": 1 if task.is_interactive() else 0,
"action": "usercaptchaupload",
}
)
for _ in range(5):
try:
res = self.load(self.API_URL, post=post_data)
except BadHeader as exc:
res = exc.content
time.sleep(3)
else:
if res and res.isdigit():
break
else:
self.log_error(self._("Bad request: {}").format(res))
return
self.log_debug(
"NewCaptchaID ticket: {}".format(res), task.captcha_params.get("file", "")
)
task.data["ticket"] = res
for _ in range(int(self.config.get("timeout") // 5)):
result = self.load(
self.API_URL,
get={
"apikey": self.config.get("passkey"),
"id": res,
"pyload": "1",
"info": "1",
"source": "pyload",
"action": "usercaptchacorrectdata",
},
)
if not result or result == "NO DATA":
time.sleep(5)
else:
break
else:
self.log_debug(f"Could not send request: {res}")
result = None
self.log_info(self._("Captcha result for ticket {}: {}").format(res, result))
task.set_result(result)
def captcha_task(self, task):
if task.is_interactive():
if task.captcha_params[
"captcha_plugin"
] != "ReCaptcha" or not self.config.get("solve_interactive"):
return
else:
if not task.is_textual() and not task.is_positional():
return
if not self.config.get("passkey"):
return
if self.pyload.is_client_connected() and self.config.get("check_client"):
return
credits = self.get_credits()
if not credits:
self.log_error(self._("Your captcha 9kw.eu account has not enough credits"))
return
max_queue = min(self.config.get("queue"), 999)
timeout = min(max(self.config.get("timeout"), 300), 3999)
pluginname = task.captcha_params["plugin"]
for _ in range(5):
servercheck = self.load("http://www.9kw.eu/grafik/servercheck.txt")
if max_queue > int(re.search(r"queue=(\d+)", servercheck).group(1)):
break
time.sleep(10)
else:
self.log_error(self._("Too many captchas in queue"))
return
for opt in self.config.get("hoster_options", "").split("|"):
if not opt:
continue
details = (x.strip() for x in opt.split(":"))
if not details or details[0].lower() != pluginname.lower():
continue
for d in details:
hosteroption = d.split("=")
if (
len(hosteroption) > 1
and hosteroption[0].lower() == "timeout"
and hosteroption[1].isdigit()
):
timeout = int(hosteroption[1])
break
task.handler.append(self)
task.set_waiting(timeout)
self._process_captcha(task)
def _captcha_response(self, task, correct):
request_type = "correct" if correct else "refund"
if "ticket" not in task.data:
self.log_debug(
"No CaptchaID for {} request (task: {})".format(request_type, task)
)
return
passkey = self.config.get("passkey")
for _ in range(3):
res = self.load(
self.API_URL,
get={
"action": "usercaptchacorrectback",
"apikey": passkey,
"api_key": passkey,
"correct": "1" if correct else "2",
"pyload": "1",
"source": "pyload",
"id": task.data["ticket"],
},
)
self.log_debug(f"Request {request_type}: {res}")
if res == "OK":
break
time.sleep(5)
else:
self.log_debug(f"Could not send {request_type} request: {res}")
def captcha_correct(self, task):
self._captcha_response(task, True)
def captcha_invalid(self, task):
self._captcha_response(task, False)
| agpl-3.0 | -3,479,573,208,365,034,500 | 29.744479 | 94 | 0.467987 | false |
PaulEcoffet/stonewallsgate | dunwallsgate/soundmanager.py | 1 | 1920 | import os.path
import pygame.mixer
import data
LOOP = -1
def load_music(music_ref):
"""
Charge une musique en mémoire mais ne la joue pas
music_ref - La référence de la musique
"""
music_path = data.get_sound_path(
os.path.join("music", music_ref + ".ogg"))
pygame.mixer.music.load(music_path)
def play_music(music_ref=None, loops=0, start=0.0):
"""
Joue la musique `music_ref`, la répète `loops` fois
en commençant à la seconde `start`.
Si loops = -1, alors la musique est jouée indéfiniment
"""
if music_ref:
load_music(music_ref)
pygame.mixer.music.play(loops, start)
def loop_music(music_ref=None):
"""
Joue en boucle infinie la musique `music_ref`.
"""
play_music(music_ref, LOOP)
def stop_music(fadeout_time=0):
"""
Stop la musique en train d'être jouée.
Si fadeout_time > 0, alors la musique disparaît
en fondu qui dure `fadeout_time` ms.
"""
if fadeout_time > 0:
pygame.mixer.music.fadeout(fadeout_time)
else:
pygame.mixer.music.stop()
def toggle_music(fadeout_time=0):
"""
Active la musique si elle est éteinte, sinon,
il la stoppe
"""
if pygame.mixer.music.get_busy():
stop_music(fadeout_time)
else:
play_music()
def set_music_volume(volume):
"""
Defini le volume de la musique
"""
pygame.mixer.music.set_volume(volume)
def get_music_volume():
"""
Retourne le volume de la musique
"""
return pygame.mixer.music.get_volume()
def play_sound(sound_ref, loops=0, maxtime=0, fade_ms=0):
"""
Joue le son avec la référence `sound_ref` et le rejoue
`loops` fois
"""
sound_path = data.get_sound_path(
os.path.join("sounds", sound_ref + ".ogg"))
sound = pygame.mixer.Sound(sound_path)
pygame.mixer.find_channel().play(sound, loops, maxtime, fade_ms)
| gpl-2.0 | -166,225,870,081,417,380 | 21.951807 | 68 | 0.626247 | false |
Joergen/zamboni | sites/landfill/settings_base.py | 1 | 5699 | """private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['sa_pool_key'] = 'master'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
## Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTIONS_ICON_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': { 'level': logging.DEBUG },
'z.hera': { 'level': logging.INFO },
'z.redis': { 'level': logging.DEBUG },
'z.pool': { 'level': logging.ERROR },
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = ('https://www.google.com/recaptcha/api/challenge?k=%s' % RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'addons-landfill'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = "https://builder-addons-dev.allizom.org/repackage/sdk-versions/"
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_landfill' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons-dev.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = 'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi'
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
METLOG_CONF = {
'plugins': {'cef': ('metlog_cef.cef_plugin:config_plugin', {})},
'sender': {
'class': 'metlog.senders.UdpSender',
'host': splitstrip(private.METLOG_CONF_SENDER_HOST),
'port': private.METLOG_CONF_SENDER_PORT,
},
}
USE_METLOG_FOR_CEF = True
USE_METLOG_FOR_TASTYPIE = True
ALLOW_SELF_REVIEWS = True
AES_KEYS = private.AES_KEYS
| bsd-3-clause | -8,507,801,712,565,959,000 | 28.225641 | 94 | 0.670819 | false |
ddinsight/dd-streamworks | stream_worker/devmodule/production/vidnet/__init__.py | 1 | 40613 | # -*- coding: utf-8 -*-
#
# Copyright 2015 AirPlug Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Not all aatLog items are included in Open DD.
You should ignore some items & tables in this module.
Items not relevant to Open DD are specified.
"""
__author__ = 'jaylee'
import time
from decimal import Decimal
import worker
from worker import log
# traffic mode
class TrafficMode:
SYSTEM = 0
AAT_AGENT = 1
# logType
class aatLogType:
END = 0
START = 1
PERIOD = 2
USER_PAUSE = 3
USER_RESUME = 4
NET_CHANGE = 5
NET_PAUSE = 6
NET_RESUME = 7
NET_CHANGE_DURING_PAUSE = 8
BITRATE_CHANGE = 9
SEEK = 10
UNKNOWN = -1
class VidnetType:
VIDNET_TYPE_PLAY = 0
VIDNET_TYPE_NET_PAUSE = 1
VIDNET_TYPE_NET_RESUME = 2
VIDNET_TYPE_USER_PAUSE = 3
VIDNET_TYPE_USER_RESUME = 4
VIDNET_TYPE_UNKNOWN = 5
class ExceptionType:
NO_AATLOG = 1 # AAT
INVALID_LOG_PATTERN = 2 # live
INVALID_LOGTYPE_IN_VIDET = 3
TRAFFIC_OVERFLOW = 4 # vidnet
class NetworkType:
WIFI = 0
CELLULAR = 1
UNKNOWN = 2
#Following function is not related to Open DD. You should ignore it
def updateMcc(deviceID, plmnId):
try:
rcur = None
strSQL = ""
if plmnId.isdigit() == True and int(plmnId) > 0:
rcur = worker.dbmanager.allocDictCursor('myapmain')
strSQL = "UPDATE mdev SET plmnid = '%s' WHERE mosid = '%s' and (plmnid <= ' ' or plmnid is NULL)" % (plmnId, deviceID)
ret = rcur.execute(strSQL)
except Exception, e:
log.error("updateMcc: %s" % e)
log.error("updateMcc: [deviceID:%s, plmnid:%s, strSQL:%s]" % (deviceID, plmnId, strSQL))
finally:
if rcur <> None:
worker.dbmanager.freeCursor(rcur)
def updateVidLog(waveCursor, vLog, row):
try:
strSQL = ""
strSQL = """UPDATE vidsession_log
SET playTime = if(playTime >= %d, playTime - %d, playTime),
pauseTime = if(pauseTime >= %d, pauseTime - %d, pauseTime),
elapsedTime = if(elapsedTime >= %d, elapsedTime - %d, elapsedTime),
cellRxBytes = if(cellRxBytes >= %d, cellRxBytes - %d, cellRxBytes),
wfRxBytes = if(wfRxBytes >= %d, wfRxBytes - %d, wfRxBytes),
cellDuration = if(cellDuration >= %d, cellDuration - %d, cellDuration),
wfDuration = if(wfDuration >= %d, wfDuration -%d, wfDuration),
lstuptmp = unix_timestamp()
WHERE playSessionID = '%s' and tTM = %s """ % (vLog['playTime'], vLog['playTime'],
vLog['pauseTime'], vLog['pauseTime'],
vLog['elapsedTime'], vLog['elapsedTime'],
vLog['cellRxBytes'], vLog['cellRxBytes'],
vLog['wfRxBytes'], vLog['wfRxBytes'],
vLog['cellDuration'], vLog['cellDuration'],
vLog['wfDuration'], vLog['wfDuration'],
vLog['playSessionID'], row['nextTTM'])
waveCursor.execute(strSQL)
except Exception, e:
log.error("updateVidLog %s" % e)
log.error(vLog)
log.error(row)
if strSQL > "":
log.error("[SQL] %s" % strSQL)
raise e
def getVidLogStatic(vidLogDict, aatLog, appSessionId, netType):
vidLogDict['playSessionID'] = aatLog['playSessionId']
vidLogDict['tTM'] = Decimal(aatLog['tTM'])
vidLogDict['oid'] = aatLog.get('log_time', '')
vidLogDict['appSessionID'] = appSessionId
vidLogDict['logType'] = aatLog.get('agentLogType', -1)
vidLogDict['logStartTime'] = aatLog.get('agentLogStartTime', 0)
vidLogDict['logEndTime'] = aatLog.get('agentLogEndTime', 0)
vidLogDict['cellid'] = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
vidLogDict['ntype'] = netType
vidLogDict['abrMode'] = aatLog.get('abrMode', '')
if aatLog.has_key('requestBR'):
vidLogDict['curBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
vidLogDict['reqBitrate'] = aatLog['requestBR']
else:
vidLogDict['curBitrate'] = aatLog.get('reqBitrate', 0)
vidLogDict['reqBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
vidLogDict['bbCount'] = aatLog.get('bbCount', 0)
vidLogDict['netCellState'] = aatLog.get('netCellState', '')
vidLogDict['bufferState'] = aatLog.get('playBufferState', '0')
vidLogDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidLogDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
vidLogDict['playEndState'] = aatLog.get('playEndState', '')
strNetActive = aatLog.get('netActiveNetwork', '')
if strNetActive.upper().find('WIFI') > 0:
strtokens = strNetActive.split('|')
if len(strtokens) > 5:
vidLogDict['ssid'] = strtokens[3]
vidLogDict['bssid'] = strtokens[4]
def vidupdate(waveCursor, aatLog, row):
try:
vidDict = {}
vidLogDict = {}
#get some values to use
cellid = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
psmode = int(aatLog['playServiceMode'])
logType = aatLog.get('agentLogType', -1)
if aatLog.get('netActiveNetwork', '').find('WIFI') >= 0:
netType = '0'
elif aatLog.get('netActiveNetwork', '').find('mobile') >= 0:
netType = '1'
else:
netType = '2'
batteryStart = 0
batteryEnd = 0
batteryValid = 0
if aatLog.has_key('batteryInfo'):
btList = aatLog['batteryInfo'].split('|')
if len(btList) == 2:
if len(btList[0].split('/')) >= 5 and len(btList[1].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
nTotLevel = float(btList[1].split('/')[3])
nBatLevel = float(btList[1].split('/')[4])
batteryEnd = (nBatLevel/nTotLevel)*100
if btList[1].split('/')[1] == 'DISCHARGING': #All batteryInfo reporting log must be 'DISCHARGING' except first.
batteryValid = 1
else:
batteryValid = 0
elif len(btList) == 1:
if len(btList[0].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
batteryEnd = batteryStart
batteryValid = 0
#get appSessionID
appSessionId = ''
strSQL = """SELECT sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and (startTime - 5) <= %d and startTime > 0 and (endTime > %d or statAppss > '0') ORDER BY sessionID DESC LIMIT 1""" % (aatLog['deviceID'],
aatLog['pkgName'], aatLog['sID'], aatLog['agentLogStartTime'], aatLog['agentLogStartTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarow = waveCursor.fetchone()
if aarow['sessionID'] > '':
appSessionId = aarow['sessionID']
#vidsession_log values
getVidLogStatic(vidLogDict, aatLog, appSessionId, netType)
#initialize if as-is record has no valid value.
if row['androidID'] == '': vidDict['androidID'] = aatLog.get('deviceID', '')
if row['vID'] == '': vidDict['vID'] = aatLog.get('vID', '')
if row['sID'] == 0: vidDict['sID'] = aatLog.get('sID', 0)
if row['verCode'] == 0: vidDict['verCode'] = aatLog.get('verCode', 0)
if row['osVer'] == '': vidDict['osVer'] = aatLog.get('osVer', '')
if row['brand'] == '': vidDict['brand'] = aatLog.get('brand', '')
if row['model'] == '': vidDict['model'] = aatLog.get('model', '')
if row['cellIdSt'] == '' and len(cellid) > 6: vidDict['cellIdSt'] = cellid
if row['cellIdEnd'] == '' and len(cellid) > 6: vidDict['cellIdEnd'] = cellid
if row['bMao'] < 0: vidDict['bMao'] = int(aatLog.get('agentAatOnOff', -1))
if row['bAnsAllow'] < 0: vidDict['bAnsAllow'] = int(aatLog.get('agentAllowAns', -1))
if row['bCellAllow'] < 0: vidDict['bCellAllow'] = int(aatLog.get('agentAllowMobile', -1))
if row['ansMode'] == '': vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
if row['agentUserSetup'] == '': vidDict['agentUserSetup'] = aatLog.get('agentUserSetup', '')
#if row['startLogType'] == '': vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
if row['hostName'] == '': vidDict['hostName'] = aatLog.get('playHost', '')
if row['originName'] == '': vidDict['originName'] = aatLog.get('playOrigin', '')
if row['contentID'] == '': vidDict['contentID'] = aatLog.get('playContentId', '')
if row['playServiceMode'] <= 0: vidDict['playServiceMode'] = aatLog.get('playServiceMode', 0)
if row['contentSize'] == 0:
if psmode == 1:
vidDict['contentSize'] = aatLog.get('vodContentSize', 0)
elif psmode == 4:
vidDict['contentSize'] = aatLog.get('audContentSize', 0)
elif psmode == 5:
vidDict['contentSize'] = aatLog.get('adnContentSize', 0)
if row['contentDuration'] == 0:
if psmode == 1:
vidDict['contentDuration'] = aatLog.get('vodContentDuration', 0)
elif psmode == 4:
vidDict['contentDuration'] = aatLog.get('audContentDuration', 0)
if row['contentBitrate'] == 0 and psmode in [2,3]:
vidDict['contentBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
#if row['channelName'] == '': vidDict['channelName'] = aatLog.get('playTitle', '').encode('utf-8')
if row['channelName'] == '': vidDict['channelName'] = aatLog.get('playTitle', '')
if row['pkgnm'] == '': vidDict['pkgnm'] = aatLog.get('pkgName', '')
if row['apppkgnm'] == '' or row['appvercd'] == '':
if(aatLog.has_key('playAppPackageName')):
appPkgs = aatLog['playAppPackageName'].split('/')
if len(appPkgs) >= 2:
vidDict['apppkgnm'] = appPkgs[0]
vidDict['appvercd'] = appPkgs[1]
if row['connectedNetCnt'] == 0:
if aatLog.has_key('netConnectedNetworkCount'):
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
if row['abrBitrateList'] == '': vidDict['abrBitrateList'] = aatLog.get('playBitrateList', '')
if row['abrUserSelBR'] == '': vidDict['abrUserSelBR'] = aatLog.get('userSelectBitrate', '')
if psmode == 5:
if row['vidnetType'] == 0: vidDict['vidnetType'] = aatLog.get('adnStartCode', 0)
if row['adnMode'] == '' or (row['adnMode'] <> 'BB' and aatLog.get('adnMode', '') == 'BB') :
vidDict['adnMode'] = aatLog.get('adnMode', '')
if row['adnRangeStart'] == 0: vidDict['adnRangeStart'] = aatLog.get('adnContentRangeStart', 0)
if row['adnDownSize'] < aatLog.get('adnDownloadSize', 0):
vidDict['adnDownSize'] = aatLog.get('adnDownloadSize', 0)
if row['contentDuration'] < aatLog.get('adnDownloadTime', 0):
vidDict['contentDuration'] = aatLog.get('adnDownloadTime', 0)
if row['adnContentID'] == 0: vidDict['adnContentID'] = aatLog.get('adnContentID', 0)
vidDict['cellSysRxBytes'] = row['cellSysRxBytes'] + aatLog.get('trafficSystemMoRxBytes', 0)
vidDict['wfSysRxBytes'] = row['wfSysRxBytes'] + aatLog.get('trafficSystemWFRxBytes', 0)
# process attributes depending on log-order
if aatLog['tTM'] > row['maxTTM']: #The log is the last of this playSession
if len(cellid) > 6:
vidDict['cellIdEnd'] = cellid
vidDict['endLogType'] = logType
vidDict['vidnetEndTime'] = aatLog.get('agentLogEndTime', 0)
vidDict['vidnetDuration'] = vidDict['vidnetEndTime'] - row['vidnetStartTime']
if aatLog.get('playPlayingTime', 0) > row['playTime']:
vidDict['playTime'] = aatLog['playPlayingTime']
if aatLog.get('playSeekCount', 0) > row['seekCnt']:
vidDict['seekCnt'] = aatLog['playSeekCount']
if aatLog.get('playSeekForwardCount', 0) > row['ffCnt']:
vidDict['ffCnt'] = aatLog['playSeekForwardCount']
if aatLog.get('playSeekRewindCount', 0) > row['rwCnt']:
vidDict['rwCnt'] = aatLog['playSeekRewindCount']
if aatLog.has_key('netConnectedNetworkCount'):
if row['connectedNetCnt'] < aatLog['netConnectedNetworkCount']:
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
if row['connectedNetCnt'] < aatLog['netConnectivityCount']:
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
if psmode in [1, 4, 5]:
vidDict['pauseCnt'] = aatLog.get('playBufferingCount', 0)
vidDict['resumeCnt'] = aatLog.get('playResumeCount', 0)
if aatLog.get('playAccBufferingTime', 0) > row['pauseTime']:
vidDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
if aatLog.get('playMaxBufferingTime', 0) > row['maxPauseTime']:
vidDict['maxPauseTime'] = aatLog.get('playMaxBufferingTime', 0)
if aatLog.get('trafficAgentMoBytes', 0) > row['cellRxBytes']:
vidDict['cellRxBytes'] = aatLog['trafficAgentMoBytes']
if aatLog.get('trafficAgentWFBytes', 0) > row['wfRxBytes']:
vidDict['wfRxBytes'] = aatLog['trafficAgentWFBytes']
vidDict['cellAvgTP'] = round(aatLog.get('trafficAgentMoAveBW',0), 4)
vidDict['wfAvgTP'] = round(aatLog.get('trafficAgentWFAveBW',0), 4)
if vidDict['cellAvgTP'] > 0:
vidDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if vidDict['wfAvgTP'] > 0:
vidDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
vidDict['batteryEnd'] = batteryEnd
#get appSessionID for vidsession
strSQL = """SELECT sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and startTime < %d and startTime > 0 and ((endTime+5) > %d or statAppss > '0') ORDER BY sessionID DESC LIMIT 1""" % (aatLog['deviceID'],
aatLog['pkgName'], aatLog['sID'], aatLog['agentLogEndTime'], aatLog['agentLogEndTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarow = waveCursor.fetchone()
if aarow['sessionID'] > '':
vidDict['appSessionIDEnd'] = aarow['sessionID']
#vidsession_log values
if aatLog.get('playPlayingTime', 0) > row['playTime']:
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0) - row['playTime']
else:
vidLogDict['playTime'] = 0
if aatLog.get('playAccBufferingTime', 0) > row['pauseTime']:
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0) - row['pauseTime']
else:
vidLogDict['pauseTime'] = 0
if aatLog.get('playPreparingTime', 0) > row['elapsedTime']:
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0) - row['elapsedTime']
else:
vidLogDict['elapsedTime'] = 0
if aatLog.get('trafficAgentMoBytes', 0) > row['cellRxBytes']:
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0) - row['cellRxBytes']
else:
vidLogDict['cellRxBytes'] = 0
if aatLog.get('trafficAgentWFBytes', 0) > row['wfRxBytes']:
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0) - row['wfRxBytes']
else:
vidLogDict['wfRxBytes'] = 0
if vidDict['cellAvgTP'] > 0 and vidDict['cellDuration'] > row['cellDuration']:
vidLogDict['cellDuration'] = vidDict['cellDuration'] - row['cellDuration']
else:
vidLogDict['cellDuration'] = 0
if vidDict['wfAvgTP'] > 0 and vidDict['wfDuration'] > row['wfDuration']:
vidLogDict['wfDuration'] = vidDict['wfDuration'] - row['wfDuration']
else:
vidLogDict['wfDuration'] = 0
elif row['bpsid'] == '': # The log is the first of this playSession
if len(cellid) > 6:
vidDict['cellIdSt'] = cellid
vidDict['startLogType'] = logType
vidDict['vidnetStartTime'] = aatLog.get('agentLogStartTime', 0)
vidDict['vidnetDuration'] = row['vidnetEndTime'] - vidDict['vidnetStartTime']
vidDict['batteryStart'] = batteryStart
if appSessionId > '':
vidDict['appSessionIDSt'] = appSessionId
#vidsession_log values
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
vidLogDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
else:
vidLogDict['cellDuration'] = 0
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
vidLogDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
else:
vidLogDict['wfDuration'] = 0
updateVidLog(waveCursor, vidLogDict, row)
else: # The log is middle of this playSession
if aatLog.get('playPlayingTime', 0) > row['mPlayTime']:
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0) - row['mPlayTime']
else:
vidLogDict['playTime'] = 0
if aatLog.get('playAccBufferingTime', 0) > row['mPauseTime']:
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0) - row['mPauseTime']
else:
vidLogDict['pauseTime'] = 0
if aatLog.get('playPreparingTime', 0) > row['mElapsedTime']:
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0) - row['mElapsedTime']
else:
vidLogDict['elapsedTime'] = 0
if aatLog.get('trafficAgentMoBytes', 0) > row['mCellBytes']:
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0) - row['mCellBytes']
else:
vidLogDict['cellRxBytes'] = 0
if aatLog.get('trafficAgentWFBytes', 0) > row['mWFBytes']:
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0) - row['mWFBytes']
else:
vidLogDict['wfRxBytes'] = 0
vidLogDict['cellDuration'] = 0
vidLogDict['wfDuration'] = 0
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
tempdur = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if tempdur > row['mCellDur']:
vidLogDict['cellDuration'] = tempdur - int(row['mCellDur'])
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
tempdur = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
if tempdur > row['mWFDur']:
vidLogDict['wfDuration'] = tempdur - int(row['mWFDur'])
updateVidLog(waveCursor, vidLogDict, row)
# process independent attributes not depending on log-order
if psmode in [2, 3]:
if logType == 6:
vidDict['pauseCnt'] = row['pauseCnt'] + 1
elif logType == 7:
vidDict['resumeCnt'] = row['resumeCnt'] + 1
if logType in [5, 8]:
if netType == '0': #WIFI
vidDict['netW2CTransferCnt'] = row['netW2CTransferCnt'] + 1
elif netType == '1': #mobile
vidDict['netC2WTransferCnt'] = row['netC2WTransferCnt'] + 1
if row['batteryValid'] == '1' and batteryValid == 0:
vidDict['batteryValid'] = '0'
if aatLog.get('netCellState', -1) > 0 and row['netAllowCell'] == '1': #The log not allow cell, as-is fully allowed.
vidDict['netAllowCell'] = '2'
elif aatLog.get('netCellState', -1) == 0 and row['netAllowCell'] == '0': #The log allow cell, as-is not allowed at all.
vidDict['netAllowCell'] = '2'
vidDict['bbCount'] = row['bbCount'] + aatLog.get('bbCount', 0)
if row['elapsedTime'] == 0 and aatLog.get('playPreparingTime', 0) > 0:
vidDict['elapsedTime'] = aatLog['playPreparingTime']
elif row['mLogType'] == 10 and row['mBufferState'] == '2' and aatLog.get('playPreparingTime', 0) > 0:
vidDict['elapsedTime'] = row['elapsedTime'] + aatLog['playPreparingTime']
#insert tables
vidDict['playSessionID'] = row['playSessionID']
updateVidnet(waveCursor, vidDict)
insertVidnetLog(waveCursor, vidLogDict)
except Exception, e:
log.error("vidupdate %s" % e)
log.error(aatLog)
log.error(vidDict)
raise e
def vidcreate(waveCursor, aatLog):
try:
vidDict = {}
vidLogDict = {}
#get some values to use
cellid = "%s_%s_%s" % (aatLog.get('confOperator', ''), aatLog.get('netCID', ''), aatLog.get('netLAC', ''))
psmode = int(aatLog['playServiceMode'])
logType = aatLog.get('agentLogType', -1)
if aatLog.get('netActiveNetwork', '').find('WIFI') >= 0:
netType = '0'
elif aatLog.get('netActiveNetwork', '').find('mobile') >= 0:
netType = '1'
else:
netType = '2'
vidDict['playSessionID'] = aatLog['playSessionId']
vidDict['androidID'] = aatLog.get('deviceID', '')
vidDict['vID'] = aatLog.get('vID', '')
vidDict['sID'] = aatLog.get('sID', 0)
vidDict['verCode'] = aatLog.get('verCode', 0)
vidDict['osVer'] = aatLog.get('osVer', '')
vidDict['brand'] = aatLog.get('brand', '')
vidDict['model'] = aatLog.get('model', '')
vidDict['cellIdSt'] = cellid
vidDict['cellIdEnd'] = cellid
vidDict['bMao'] = int(aatLog.get('agentAatOnOff', -1))
vidDict['bAnsAllow'] = int(aatLog.get('agentAllowAns', -1))
vidDict['bCellAllow'] = int(aatLog.get('agentAllowMobile', -1))
vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
vidDict['agentUserSetup'] = aatLog.get('agentUserSetup', '')
#vidDict['ansMode'] = aatLog.get('agentAnsMode', -1)
vidDict['hostName'] = aatLog.get('playHost', '')
vidDict['originName'] = aatLog.get('playOrigin', '')
vidDict['contentID'] = aatLog.get('playContentId', '')
vidDict['playServiceMode'] = psmode
if psmode == 1:
vidDict['contentSize'] = aatLog.get('vodContentSize', 0)
elif psmode == 4:
vidDict['contentSize'] = aatLog.get('audContentSize', 0)
elif psmode == 5:
vidDict['contentSize'] = aatLog.get('adnContentSize', 0)
else:
vidDict['contentSize'] = 0
if psmode == 1:
vidDict['contentDuration'] = aatLog.get('vodContentDuration', 0)
elif psmode == 4:
vidDict['contentDuration'] = aatLog.get('audContentDuration', 0)
elif psmode == 5:
vidDict['contentDuration'] = aatLog.get('adnDownloadTime', 0)
else:
vidDict['contentDuration'] = 0
if psmode in [2,3]:
vidDict['contentBitrate'] = aatLog.get('liveCurrentTSBitrate', 0)
else:
vidDict['contentBitrate'] = 0
#vidDict['channelName'] = aatLog.get('playTitle', '').encode('utf-8')
vidDict['channelName'] = aatLog.get('playTitle', '')
vidDict['pkgnm'] = aatLog.get('pkgName', '')
vidDict['apppkgnm'] = ""
vidDict['appvercd'] = ""
if(aatLog.has_key('playAppPackageName')):
appPkgs = aatLog['playAppPackageName'].split('/')
if len(appPkgs) >= 2:
vidDict['apppkgnm'] = appPkgs[0]
vidDict['appvercd'] = appPkgs[1]
if aatLog.has_key('netConnectedNetworkCount'):
vidDict['connectedNetCnt']=aatLog['netConnectedNetworkCount']
elif aatLog.has_key('netConnectivityCount'):
vidDict['connectedNetCnt']=aatLog['netConnectivityCount']
else:
vidDict['connectedNetCnt']=0
vidDict['abrBitrateList'] = aatLog.get('playBitrateList', '')
vidDict['abrUserSelBR'] = aatLog.get('userSelectBitrate', '')
if psmode == 5:
vidDict['vidnetType'] = aatLog.get('adnStartCode', 0)
vidDict['adnMode'] = aatLog.get('adnMode', '')
vidDict['adnRangeStart'] = aatLog.get('adnContentRangeStart', 0)
vidDict['adnDownSize'] = aatLog.get('adnDownloadSize', 0)
vidDict['adnContentID'] = aatLog.get('adnContentID', 0)
vidDict['startLogType'] = logType
vidDict['endLogType'] = logType
vidDict['vidnetStartTime'] = aatLog.get('agentLogStartTime', 0)
vidDict['vidnetEndTime'] = aatLog.get('agentLogEndTime', 0)
vidDict['vidnetDuration'] = vidDict['vidnetEndTime'] - vidDict['vidnetStartTime']
# process independent attributes not depending on log-order
vidDict['pauseCnt'] = 0
vidDict['resumeCnt'] = 0
vidDict['netW2CTransferCnt'] = 0
vidDict['netC2WTransferCnt'] = 0
if psmode in [2, 3]:
if logType == 6:
vidDict['pauseCnt'] = 1
elif logType == 7:
vidDict['resumeCnt'] = 1
elif psmode in [1, 4, 5]:
vidDict['pauseCnt'] = aatLog.get('playBufferingCount', 0)
vidDict['resumeCnt'] = aatLog.get('playResumeCount', 0)
if logType in [5, 8]:
if netType == '0': #WIFI
vidDict['netW2CTransferCnt'] = 1
elif netType == '1': #mobile
vidDict['netC2WTransferCnt'] = 1
vidDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidDict['seekCnt'] = aatLog.get('playSeekCount', 0)
vidDict['ffCnt'] = aatLog.get('playSeekForwardCount', 0)
vidDict['rwCnt'] = aatLog.get('playSeekRewindCount', 0)
vidDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidDict['maxPauseTime'] = aatLog.get('playMaxBufferingTime', 0)
vidDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
vidDict['cellAvgTP'] = round(aatLog.get('trafficAgentMoAveBW',0), 4)
vidDict['wfAvgTP'] = round(aatLog.get('trafficAgentWFAveBW',0), 4)
vidDict['cellDuration'] = 0
vidDict['wfDuration'] = 0
vidDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
if vidDict['cellAvgTP'] > 0:
vidDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if vidDict['wfAvgTP'] > 0:
vidDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
batteryStart = 0
batteryEnd = 0
batteryValid = '0'
if aatLog.has_key('batteryInfo'):
btList = aatLog['batteryInfo'].split('|')
if len(btList) == 2:
if len(btList[0].split('/')) >= 5 and len(btList[1].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
nTotLevel = float(btList[1].split('/')[3])
nBatLevel = float(btList[1].split('/')[4])
batteryEnd = (nBatLevel/nTotLevel)*100
if btList[1].split('/')[1] == 'DISCHARGING': #All batteryInfo reporting log must be 'DISCHARGING' except first.
batteryValid = 1
else:
batteryValid = 0
elif len(btList) == 1:
if len(btList[0].split('/')) >= 5:
nTotLevel = float(btList[0].split('/')[3])
nBatLevel = float(btList[0].split('/')[4])
batteryStart = (nBatLevel/nTotLevel)*100
batteryEnd = batteryStart
batteryValid = 0
vidDict['batteryStart'] = batteryStart
vidDict['batteryEnd'] = batteryEnd
vidDict['batteryValid'] = str(batteryValid)
if aatLog.get('netCellState', -1) > 0:
vidDict['netAllowCell'] = '0'
elif aatLog.get('netCellState', -1) == 0:
vidDict['netAllowCell'] = '1'
vidDict['bbCount'] = aatLog.get('bbCount', 0)
vidDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
#get appSessionID
vidDict['appSessionIDSt'] = ''
vidDict['appSessionIDEnd'] = ''
strSQL = """SELECT MAX(1) as ord, MAX(sessionID) as sessionID FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and (startTime - 5) <= %d and startTime > 0 and (endTime > %d or statAppss > '0')
UNION ALL
SELECT MAX(2), MAX(sessionID) FROM appsession WHERE androidID = '%s' and pkgnm = '%s' and sID = %d
and startTime < %d and startTime > 0 and ((endTime + 5) > %d or statAppss > '0')
""" % (aatLog['deviceID'], aatLog['pkgName'], aatLog['sID'], aatLog['agentLogStartTime'], aatLog['agentLogStartTime'],
aatLog['deviceID'], aatLog['pkgName'], aatLog['sID'], aatLog['agentLogEndTime'], aatLog['agentLogEndTime'])
ret = waveCursor.execute(strSQL)
if ret > 0:
aarows = waveCursor.fetchall()
for r in aarows:
if r['sessionID'] > '' and r['sessionID'] <> None:
if r['ord'] == 1:
vidDict['appSessionIDSt'] = r['sessionID']
elif r['ord'] == 2:
vidDict['appSessionIDEnd'] = r['sessionID']
#vidsession_log values
getVidLogStatic(vidLogDict, aatLog, vidDict['appSessionIDSt'], netType)
vidLogDict['playTime'] = aatLog.get('playPlayingTime', 0)
vidLogDict['pauseTime'] = aatLog.get('playAccBufferingTime', 0)
vidLogDict['elapsedTime'] = aatLog.get('playPreparingTime', 0)
vidLogDict['cellRxBytes'] = aatLog.get('trafficAgentMoBytes', 0)
vidLogDict['wfRxBytes'] = aatLog.get('trafficAgentWFBytes', 0)
vidLogDict['cellSysRxBytes'] = aatLog.get('trafficSystemMoRxBytes', 0)
vidLogDict['wfSysRxBytes'] = aatLog.get('trafficSystemWFRxBytes', 0)
vidLogDict['cellDuration'] = 0
vidLogDict['wfDuration'] = 0
if round(aatLog.get('trafficAgentMoAveBW',0), 4) > 0:
vidLogDict['cellDuration'] = int((aatLog.get('trafficAgentMoBytes', 0)*8) / (aatLog['trafficAgentMoAveBW']*1000000))
if round(aatLog.get('trafficAgentWFAveBW',0), 4) > 0:
vidLogDict['wfDuration'] = int((aatLog.get('trafficAgentWFBytes', 0)*8) / (aatLog['trafficAgentWFAveBW']*1000000))
#insert tables
insertVidnet(waveCursor, vidDict)
insertVidnetLog(waveCursor, vidLogDict)
except Exception, e:
log.error("vidcreate %s" % e)
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def getBBinfo(rBBList, aatLog):
try:
if aatLog.has_key('bbCount') == False or aatLog.has_key('bbList') == False:
return
BBcount = aatLog['bbCount']
if BBcount == 0:
return
elif BBcount > 40:
BBcount = 40
if isinstance(aatLog['bbList'], list):
bblst = aatLog['bbList'][0:BBcount]
else:
bblst = aatLog['bbList'].strip('[ ]').split(',')
bblst = bblst[0:BBcount]
bblst = bblst[0:BBcount]
bbdict = {}
for bbItem in bblst:
if bbItem.find('|') < 0: continue
bbElm = bbItem.strip(" u'\"").split('|')
bbdict['psid'] = aatLog['playSessionId']
bbdict['bb'] = list(bbElm)
rBBList.append(bbdict.copy())
except Exception, e:
log.error("getBBinfo error:%s" % e)
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def insertBBSQL(waveCursor, bbList):
try:
strLst = []
for bbElm in bbList:
if len(bbElm['bb']) == 8 and bbElm['bb'][7] == 'e':
strValue = "('%s', %s, %s, '%s', '%s', %s, %s, %s, unix_timestamp())" % (bbElm['psid'], bbElm['bb'][0], bbElm['bb'][2], \
'{0:02d}'.format(int(bbElm['bb'][1])), '{0:02d}'.format(int(bbElm['bb'][3])), bbElm['bb'][4], bbElm['bb'][5], bbElm['bb'][6])
elif len(bbElm['bb']) == 6:
strValue = "('%s', %s, %s, '%s', '%s', %s, %s, NULL, unix_timestamp())" % (bbElm['psid'], bbElm['bb'][0], bbElm['bb'][2], \
'{0:02d}'.format(int(bbElm['bb'][1])), '{0:02d}'.format(int(bbElm['bb'][3])), bbElm['bb'][4], bbElm['bb'][5])
else:
log.warn("BBList format error:")
log.warn(bbElm)
continue
strLst.append(strValue)
if len(strLst) > 0:
sql = """insert into vidsession_bb (playSessionID, stTime, endTime, stCode, endCode, trWF, trCell, stBBTime, lstuptmp)
values %s
on duplicate key update endTime = values(endTime), stCode = values(stCode), trWF = values(trWF), trCell = values(trCell), stBBTime = values(stBBTime), lstuptmp = unix_timestamp()
""" % ', '.join(strLst)
ret = waveCursor.execute(sql)
if ret == 0:
log.warn("insertBBSQL no record affected [%s]" % sql)
except Exception, e:
log.error("insertBBSQL error:%s" % e)
log.error(bbList)
raise e
#Following function is not related to Open DD. You should ignore it
def getNetConn(rstNetList, aatLog):
try:
if aatLog.has_key('netConnectivityList') == False or len(aatLog['netConnectivityList']) == 0:
return
try:
# log.info(type(aatLog['netConnectivityList']))
# log.info(aatLog['netConnectivityList'])
if type(aatLog['netConnectivityList']) == list:
netlst = aatLog['netConnectivityList']
else:
netlst = json.loads(aatLog['netConnectivityList'])
except Exception, e:
netlst = aatLog['netConnectivityList'].strip('[ ]').split(',')
for netItem in netlst:
if netItem == None:
break
if netItem.find('|') < 0:
if len(netItem) > 0:
continue
else:
break
netElm = netItem.strip(" u'\"").replace("||", "|").split('|')
netDict = {}
if netElm[2].find('WIFI') >= 0:
if len(netElm) == 7:
netDict['playSessionID'] = aatLog['playSessionId']
netDict['stTime'] = netElm[0]
netDict['ntype'] = 'w'
netDict['bssid'] = netElm[4]
netDict['ssid'] = netElm[3].replace("'", "''")
netDict['traffic'] = netElm[6]
rstNetList.append(netDict.copy())
else:
if len(netElm) == 5:
netDict['playSessionID'] = aatLog['playSessionId']
netDict['stTime'] = netElm[0]
netDict['ntype'] = 'm'
netDict['traffic'] = netElm[4]
rstNetList.append(netDict.copy())
except Exception, e:
log.error("getNetconnInfo error:[%s]%s" % (type(e), e))
log.error(aatLog)
raise e
#Following function is not related to Open DD. You should ignore it
def insertNetInfo(waveCursor, netList):
try:
strLst = []
for netElm in netList:
if netElm['ntype'] == 'w':
strValue = "('%s', %s, '%s', '%s', '%s', %s, unix_timestamp())" % \
(netElm['playSessionID'], netElm['stTime'], netElm['ntype'], \
netElm['bssid'], netElm['ssid'], netElm['traffic'])
else:
strValue = "('%s', %s, '%s', NULL, NULL, %s, unix_timestamp())" % \
(netElm['playSessionID'], netElm['stTime'], netElm['ntype'], netElm['traffic'])
strLst.append(strValue)
if len(strLst) > 0:
sql = """insert into vidsession_net (playSessionID, stTime, ntype, bssid, ssid, traffic, lstuptmp)
values %s
on duplicate key update ntype = values(ntype), bssid = values(bssid), ssid = values(ssid), traffic = values(traffic), lstuptmp = unix_timestamp()
""" % ', '.join(strLst)
ret = waveCursor.execute(sql)
if ret == 0:
log.warn("insertNetInfo no record affected [%s]" % sql)
except Exception, e:
log.error("insertNetInfo error:%s" % e)
log.error(strLst)
raise e
def insertVidnet(waveCursor, vidDict):
if vidDict == None:
log.warn('vidnetDict is null')
return False
cols = vidDict.keys()
vals = vidDict.values()
try:
slist = []
for v in vals:
if type(v) == str or type(v) == unicode:
slist.append("'" + v.replace("'", "''") + "'")
else:
slist.append(str(v))
sql = """insert into vidsession (%s, lstuptmp) values (%s, unix_timestamp())""" % (",".join(cols), unicode(",", "utf-8").join(slist))
waveCursor.execute(sql)
except Exception, e:
log.error("INSERT VIDNET ERROR:%s" % e)
log.error(vidDict)
raise e
def insertVidnetLog(waveCursor, vidLogDict):
if vidLogDict == None:
log.warn('vidLogDict is null')
return False
cols = vidLogDict.keys()
vals = vidLogDict.values()
try:
sql = """insert into vidsession_log (%s, lstuptmp) values (%s, unix_timestamp())""" % (",".join(cols), ",".join(["'" + str(val).replace("'", "''") + "'" for val in vals]))
waveCursor.execute(sql)
except Exception, e:
log.error("INSERT vidsession_log ERROR:%s" % e)
log.error(vidLogDict)
raise e
def updateVidnet(waveCursor, vidDict):
if vidDict == None:
log.warn('updateVidnet : vidDict is null')
return False
playSessionID = vidDict.pop('playSessionID')
cols = vidDict.keys()
vals = vidDict.values()
try:
slist = []
for key in vidDict:
if type(vidDict[key]) == str or type(vidDict[key]) == unicode:
s = "%s = '%s'" % (key, vidDict[key].replace("'", "''"))
else:
s = "%s = %s" % (key, str(vidDict[key]))
slist.append(s)
slist.append("lstuptmp = unix_timestamp()")
#sql = "UPDATE vidsession SET %s WHERE playSessionID = '%s'" % (unicode(',', 'utf-8').join(map(lambda key:"%s='%s'" % (key, unicode(vidDict[key], 'utf-8').replace("'", "''")), vidDict)), playSessionID)
sql = "UPDATE vidsession SET %s WHERE playSessionID = '%s'" % (unicode(',', 'utf-8').join(slist), playSessionID)
waveCursor.execute(sql)
except Exception, e:
log.error("update vidsession ERROR:%s, playSessionID:%s" % (e, playSessionID))
log.error(vidDict)
raise e
###############################################################################################################
######################## PROCESS By ONE AATLOG routine ############################################
###############################################################################################################
#
#Open DD processing only include following items in aatLog
#The others should be ignored.
#
# 'log_time', 'abrMode', 'agentAatOnOff',
# 'agentLogEndTime', 'agentLogStartTime', 'agentLogType', 'bbCount',
# 'bbList', 'brand', 'confOperator', 'deviceID', 'liveCurrentTSBitrate', 'model', 'netActiveNetwork', 'netCellState',
# 'netCID', 'netLAC', 'numTotalHits', 'osVer', 'pkgName', 'playAccBufferingTime',
# 'playAppPackageName', 'playContentId', 'playHost',
# 'playOrigin', 'playPlayingTime', 'playPreparingTime',
# 'playServiceMode', 'playSessionId', 'playTitle', 'requestBR', 'sID',
# 'trafficAgentMoAveBW', 'trafficAgentMoBytes', 'trafficAgentWFAveBW', 'trafficAgentWFBytes', 'trafficSystemMoRxBytes',
# 'trafficSystemWFRxBytes', 'playEndState', 'tTM', 'verCode', 'vID'
#
###############################################################################################################
class ProcessAATLog(object):
# for debug temp
OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog']
# OW_TASK_SUBSCRIBE_EVENTS = []
# for debug temp
OW_TASK_PUBLISH_EVENTS = []
OW_USE_HASHING = False
OW_HASH_KEY = None
OW_NUM_WORKER = 16
def publishEvent(self, event, params):
# THIS METHOD WILL BE OVERRIDE
# DO NOT EDIT THIS METHOD
pass
def handler(self, aatLog):
try:
waveCursor = None
#update apmain.mdev's plmnid for hoppin case
if aatLog.get('confOperator', '') > '':
updateMcc(aatLog.get('deviceID', ''), aatLog['confOperator'])
#in case of Off Log, process only End(0) log
if int(aatLog.get('agentAatOnOff', -1)) == 0:
if aatLog.get('agentLogType', -1) <> 0:
return
curAnID = aatLog.get('deviceID').strip(" u'")
curPkgname = aatLog.get('pkgName').strip(" u'")
curPsID = aatLog.get('playSessionId', '').strip(" u'")
curTTM = Decimal(aatLog.get('tTM', 0.0))
curEndTm = int(aatLog.get('agentLogEndTime', 0))
waveCursor = worker.dbmanager.allocDictCursor('myapwave')
waveCursor.execute("START TRANSACTION")
strSQL= None
strSQL = """SELECT a.*, IFNULL(b.psid, '') AS bpsid, b.*, e.* FROM
(SELECT m.*, MAX(n.tTM) AS maxTTM, MAX(IF(n.tTM = %.3f, 1, 0) ) AS bExist
FROM vidsession m LEFT OUTER JOIN vidsession_log n ON m.playSessionID = n.playSessionID
WHERE m.playSessionID = '%s') a LEFT OUTER JOIN
(SELECT playSessionID AS psid, MAX(tTM) AS lstTTM,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), logType)), 15) AS mLogType,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), logEndTime)), 15) AS mLogEndTime,
SUBSTR(MAX(CONCAT(RPAD(tTM, 14, '0'), IFNULL(bufferState, '0'))), 15) AS mBufferState,
SUM(playTime) AS mPlayTime, SUM(pauseTime) AS mPauseTime,
SUM(elapsedTime) AS mElapsedTime, SUM(cellRxBytes) AS mCellBytes, SUM(wfRxBytes) AS mWFBytes,
SUM(cellDuration) AS mCellDur, SUM(wfDuration) AS mWFDur
FROM vidsession_log
WHERE playSessionID = '%s' AND tTM < %.3f) b
ON a.playSessionID = b.psid
LEFT OUTER JOIN
(SELECT playSessionID AS psid, MIN(tTM) AS nextTTM
FROM vidsession_log
WHERE playSessionID = '%s' AND tTM > %.3f ) e
ON a.playSessionID = e.psid
""" % (curTTM, curPsID, curPsID, curTTM, curPsID, curTTM)
ret = waveCursor.execute(strSQL)
if ret > 0:
row = waveCursor.fetchone()
if row['playSessionID'] <> None:
if row['bExist'] == 1:
return
else:
vidupdate(waveCursor, aatLog, row)
else:
vidcreate(waveCursor, aatLog)
else: # Insert new playsession
vidcreate(waveCursor, aatLog)
# get BB, BW
#Following code is not related to Open DD. You should ignore it.
#### BEGIN - IGNORE
logSubList = []
getBBinfo(logSubList , aatLog)
insertBBSQL(waveCursor, logSubList)
logSubList = []
getNetConn(logSubList, aatLog)
insertNetInfo(waveCursor, logSubList)
#### END - IGNORE
waveCursor.execute("COMMIT")
except Exception, e:
log.error("processAATLOG : %s" % e)
log.error(aatLog)
if strSQL <> None:
log.error(strSQL)
if waveCursor <> None:
waveCursor.execute("ROLLBACK")
if str(e).find('Deadlock')> 0:
log.error("processAAATLog raise e")
raise e
finally:
if waveCursor <> None:
worker.dbmanager.freeCursor(waveCursor)
| apache-2.0 | -2,276,425,715,459,409,000 | 38.855741 | 211 | 0.637161 | false |
whitesmith/hawkpost | humans/migrations/0007_notification.py | 1 | 1205 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-27 15:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('humans', '0006_user_server_signed'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=150)),
('body', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('sent_at', models.DateTimeField(null=True)),
('send_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
options={
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications',
},
),
]
| mit | 1,414,934,465,553,693,700 | 35.515152 | 132 | 0.570124 | false |
shivupa/pyci | methods/misc/asci_old2.py | 1 | 6642 | import scipy as sp
import scipy.linalg as spla
import scipy.sparse.linalg as splinalg
import numpy as np
from functools import reduce
import pyscf
import itertools
import h5py
from pyscf import gto, scf, ao2mo, fci
import pyscf.tools as pt
import copy
import matplotlib.pyplot as plt
from utils import *
#############
# INPUT
#############
#TODO: implement function that finds particles/holes based on set operations (will be easier with aocc,bocc lists of indices instead of docc,aocc(single),bocc(single)
np.set_printoptions(precision=4,suppress=True)
mol = gto.M(
atom = [['O', (0.000000000000, -0.143225816552, 0.000000000000)],
['H', (1.638036840407, 1.136548822547, -0.000000000000)],
['H', (-1.638036840407, 1.136548822547, -0.000000000000)]],
basis = 'STO-3G',
verbose = 1,
unit='b',
symmetry=True
)
Na,Nb = mol.nelec #nelec is a tuple with (N_alpha, N_beta)
nao=mol.nao_nr()
s = mol.intor('cint1e_ovlp_sph')
t = mol.intor('cint1e_kin_sph')
v = mol.intor('cint1e_nuc_sph')
h=t+v
printroots=4
#############
# FUNCTIONS
#############
""" TODO: remove this?def create_PYSCF_fcidump():
myhf = scf.RHF(mol)
E = myhf.kernel()
c = myhf.mo_coeff
h1e = reduce(np.dot, (c.T, myhf.get_hcore(), c))
eri = ao2mo.kernel(mol, c)
pt.fcidump.from_integrals('fcidump.txt', h1e, eri, c.shape[1],mol.nelectron, ms=0)
cisolver = fci.FCI(mol, myhf.mo_coeff)
print('E(HF) = %.12f, E(FCI) = %.12f' % (E,(cisolver.kernel()[0] + mol.energy_nuc())))
"""
def amplitude(det,excitation):
return 0.1
#############
# INITIALIZE
#############
myhf = scf.RHF(mol)
E = myhf.kernel()
c = myhf.mo_coeff
#if you change the sign of these two orbitals, the hamiltonian matrix elements agree with those from GAMESS
#c.T[2]*=-1
#c.T[5]*=-1
cisolver = fci.FCI(mol, c)
#print('PYSCF E(FCI) = %.12f' % (cisolver.kernel()[0] + mol.energy_nuc()))
efci = cisolver.kernel(nroots=printroots)[0] + mol.energy_nuc()
h1e = reduce(np.dot, (c.T, myhf.get_hcore(), c))
eri = ao2mo.kernel(mol, c)
cdets = 25
tdets = 50
threshold = 1e-13 #threshold for hii and hij
#use eri[idx2(i,j),idx2(k,l)] to get (ij|kl) chemists' notation 2e- ints
#make full 4-index eris in MO basis (only for testing idx2)
#eri_mo = ao2mo.restore(1, eri, nao)
#eri in AO basis
#eri_ao = mol.intor('cint2e_sph')
#eri_ao = eri_ao.reshape([nao,nao,nao,nao])
#print h1e
#print eri
#print np.shape(h1e),np.shape(eri)
#print mol.nelectron, np.shape(h1e)[0]*2
num_orbs=2*nao
num_occ = mol.nelectron
num_virt = num_orbs - num_occ
#bitstring = "1"*num_occ
#bitstring += "0"*num_virt
#print(bitstring)
#starting_amplitude =1.0
#original_detdict = {bitstring:starting_amplitude}
H_core = np.array((cdets,cdets))
H_target = np.array((tdets,tdets))
#generate all determinants
fulldetlist_sets=gen_dets_sets(nao,Na,Nb)
ndets=len(fulldetlist_sets)
#start with HF determinant
original_detdict = {fulldetlist_sets[0]:1.0}
#lists for csr sparse storage of hamiltonian
#if this is just for storage (and not diagonalization) then we can use a dict instead (or store as upper half of sparse matrix)
hrow=[]
hcol=[]
hval=[]
for i in range(ndets):
idet=fulldetlist_sets[i]
hii = calc_hii_sets(idet,h1e,eri)
if abs(hii)>threshold: #we probably don't need this
hrow.append(i)
hcol.append(i)
hval.append(hii)
for j in range(i+1,ndets):
jdet=fulldetlist_sets[j]
nexc_ij = n_excit_sets(idet,jdet)
if nexc_ij in (1,2):
if nexc_ij==1:
hij = calc_hij_single_sets(idet,jdet,h1e,eri)
else:
hij = calc_hij_double_sets(idet,jdet,h1e,eri)
if abs(hij)>threshold:
hrow.append(i)
hrow.append(j)
hcol.append(j)
hcol.append(i)
hval.append(hij)
hval.append(hij)
fullham=sp.sparse.csr_matrix((hval,(hrow,hcol)),shape=(ndets,ndets))
#hamiltonian_heatmap(fullham);
#print(len(fulldetlist_sets))
eig_vals,eig_vecs = sp.sparse.linalg.eigsh(fullham,k=2*printroots)
eig_vals_sorted = sorted(eig_vals)[:printroots] + mol.energy_nuc()
eig_vals_gamess = [-75.0129802245,
-74.7364625517,
-74.6886742417,
-74.6531877287]
print("first {:} pyci eigvals vs PYSCF eigvals".format(printroots))
for i,j in zip(eig_vals_sorted, efci):
print(i,j)
#############
# MAIN LOOP
#############
# a^dagger_i a_j |psi>
temp_detdict = {}
temp_double_detdict = {}
new_detdict = copy.deepcopy(original_detdict)
#print(temp_detdict)
for det in original_detdict:
occ_index = []
virt_index = []
count = 0
for i in det:
if i == "1":
occ_index.append(count)
else:
virt_index.append(count)
count +=1
#print(occ_index)
#print(virt_index)
for i in occ_index:
for j in virt_index:
temp_det = list(det)
temp_det[i] = "0"
temp_det[j] = "1"
temp_det = ''.join(temp_det)
temp_detdict[temp_det] = 0.1
#print temp_det, temp_amplitude
for k in occ_index:
for l in virt_index:
if k>i and l>j:
temp_double_det = list(det)
temp_double_det[i] = "0"
temp_double_det[j] = "1"
temp_double_det[k] = "0"
temp_double_det[l] = "1"
temp_double_det = ''.join(temp_double_det)
temp_double_detdict[temp_double_det] = 0.3
for i in temp_detdict:
try:
new_detdict[i] += temp_detdict[i]
except:
new_detdict.update({i:temp_detdict[i]})
for i in temp_double_detdict:
try:
new_detdict[i] += temp_double_detdict[i]
except:
new_detdict.update({i:temp_double_detdict[i]})
#new_detdict.update(temp_double_detdict)
#detdict = {}
#new_detdict.update(original_detdict)
#print("shiv",len(temp_detdict))
#print("shiv",len(temp_double_detdict))
#for i in new_detdict:
#print(i, new_detdict[i])
#print(sorted(new_detdict.items(), key=lambda x: x[1]))
#print(len(new_detdict))
#one of these agrees with gamess and one does not
#print("d_a_b_single(('1111100','1110110'),('1111100','1111100'))")
#d_a_b_single(('1111100','1110110'),('1111100','1111100'))
#print("d_a_b_single(('1111100','1011110'),('1111100','1110110'))")
#print(d_a_b_single(('1111100','1011110'),('1111100','1110110')))
#print("d_a_b_single(('1111100','1110011'),('1111100','1111001'))")
#print(d_a_b_single(('1111100','1110011'),('1111100','1111001')))
| gpl-3.0 | 6,862,469,402,105,148,000 | 30.932692 | 166 | 0.606896 | false |
dvl/cdzforever.net | cdzforever/apps/manga/migrations/0002_auto__add_field_pagina_image.py | 1 | 2062 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pagina.image'
db.add_column(u'manga_pagina', 'image',
self.gf('django.db.models.fields.files.ImageField')(default=None, max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pagina.image'
db.delete_column(u'manga_pagina', 'image')
models = {
u'manga.capitulo': {
'Meta': {'ordering': "('num', 'titulo')", 'object_name': 'Capitulo'},
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {}),
'serie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Serie']"}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '90'})
},
u'manga.pagina': {
'Meta': {'ordering': "('num',)", 'object_name': 'Pagina'},
'capitulo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Capitulo']"}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'num': ('django.db.models.fields.IntegerField', [], {})
},
u'manga.serie': {
'Meta': {'ordering': "('nome',)", 'object_name': 'Serie'},
'capitulos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '90'})
}
}
complete_apps = ['manga'] | mit | 1,814,648,615,557,720,600 | 44.844444 | 120 | 0.549467 | false |
vyzyv/university | python/Zestaw8/8_3.py | 1 | 1027 | from random import uniform
def calc_pi(n=100, r=1):
"""Function calculating approximation of pi number.
Based on Monte Carlo algorithm
Arguments are:
n - number of random numbers generated by uniform distribution
r - radius of a circle"""
circle_counter = 0
for i in range(n):
x, y = uniform(0,2*r), uniform(0,2*r)
#check whether point is inside the circle
if ((x-r)**2 + (y-r)**2) < r**2:
circle_counter += 1
return 4*circle_counter / n
if __name__ == '__main__':
print('Pi approximation with n=%i and r=%f' %(10, 1), calc_pi(10, 1))
print('Pi approximation with n=%i and r=%f' %(100, 1), calc_pi(100, 1))
print('Pi approximation with n=%i and r=%f' %(1000, 1), calc_pi(1000, 1))
print('Pi approximation with n=%i and r=%f' %(10000, 1), calc_pi(10000, 1))
print('Pi approximation with n=%i and r=%f' %(100000, 1), calc_pi(100000, 1))
print('Pi approximation with n=%i and r=%f' %(1000000, 1), calc_pi(1000000, 1))
| apache-2.0 | 4,753,075,106,202,006,000 | 34.413793 | 83 | 0.594937 | false |
jeremymcrae/denovoFilter | tests/test_site_deviations.py | 1 | 4718 | '''
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import unittest
import math
from pandas import DataFrame, Series
from denovoFilter.site_deviations import site_strand_bias, test_sites, test_genes
class TestSiteDeviations(unittest.TestCase):
def setUp(self):
self.counts = DataFrame({'person_stable_id': ['a', 'b'],
'chrom': ['1', '1'],
'pos': [1, 2],
'ref': ['A', 'G'],
'alt': ['C', 'T'],
'symbol': ['TEST1', 'TEST2'],
'child_ref_F': [40, 15], 'child_ref_R': [15, 15],
'child_alt_F': [20, 15], 'child_alt_R': [25, 15],
'mother_ref_F': [40, 20], 'mother_ref_R': [15, 20],
'mother_alt_F': [0, 0], 'mother_alt_R': [1, 0],
'father_ref_F': [60, 30], 'father_ref_R': [30, 30],
'father_alt_F': [0, 0], 'father_alt_R': [1, 1],
'min_parent_alt': [1, 0]
})
def check_series(self, a, b):
'''
'''
for x, y in zip(a, b):
if math.isnan(x):
self.assertTrue(math.isnan(x))
self.assertTrue(math.isnan(y))
else:
self.assertAlmostEqual(x, y, 14)
def test_site_strand_bias(self):
''' check that site_strand_bias works correctly
'''
site = {'ref_F': 5, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}
self.assertAlmostEqual(site_strand_bias(site), 0.010024722592, places=11)
site = {'ref_F': 30, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}
self.assertEqual(site_strand_bias(site), 1.0)
# zero counts give a p-value of 1.0
site = {'ref_F': 0, 'ref_R': 0, 'alt_F': 0, 'alt_R': 0}
self.assertEqual(site_strand_bias(site), 1.0)
# check that values which would ordinarily give out of bounds errors
# instead are converted to a p-value of 1.0. Some versions of scipy have
# fixed this bug, and give a correct value, which we need to check too.
# A later scipy version (0.19.0) changed how the hypergeometric
# distribution is estimated, which shifts the Fisher exact p-value from
# the 7th significant figure onwards.
site = {'ref_F': 1, 'ref_R': 2, 'alt_F': 9, 'alt_R': 84419233}
self.assertIn(site_strand_bias(site), (1.0, 3.5536923140874242e-07,
3.5536916732288063e-07))
def test_test_sites(self):
''' check p-values from tests of strand and parental alt bias.
'''
expected = [[0.00061560815415820467, 1.0], [0.035457115371929658, 0.18307032892094907]]
for x, y in zip(test_sites(self.counts), expected):
self.check_series(x, y)
# check when we mask some variants due to failing earlier variants
expected = [[float('nan'), 1.0], [float('nan'), 0.18307032892094907]]
for x, y in zip(test_sites(self.counts, pass_status=[False, True]), expected):
self.check_series(x, y)
def test_test_genes(self):
''' check p-values from test of parental alt bias within genes.
'''
# one of the sites fails the strand bias filter, so this gets dropped
# for checking gene-based paternal alt biases
sb, pa = test_sites(self.counts)
expected = [float('nan'), 0.18307032892094907]
self.check_series(test_genes(self.counts, sb), expected)
# check when we mask some variants due to failing earlier variants
expected = [float('nan'), float('nan')]
self.check_series(test_genes(self.counts, sb, pass_status=[True, False]), expected)
| mit | 4,832,060,781,132,746,000 | 41.890909 | 95 | 0.605341 | false |
dougwig/a10-neutron-lbaas | a10_neutron_lbaas/db/models/scaling_group.py | 1 | 10831 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import inspect
from sqlalchemy.orm import backref, relationship
from a10_neutron_lbaas.db import model_base as models
LOG = logging.getLogger(__name__)
class A10ScalingGroup(models.A10Base):
"""A10 Scaling Group - container of switch and workers"""
__tablename__ = u'a10_scaling_groups'
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=True)
scaling_policy = relationship('A10ScalingPolicy', backref='scaling_groups')
switches = relationship('A10ScalingGroupSwitch')
workers = relationship('A10ScalingGroupWorker')
members = relationship('A10ScalingGroupMember', backref='scaling_group')
__mapper_args__ = {
'polymorphic_identity': __tablename__
}
class A10ScalingGroupBinding(models.A10Base):
__tablename__ = u'a10_scaling_group_bindings'
id = sa.Column(sa.String(36),
primary_key=True,
nullable=False,
default=models._uuid_str)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
scaling_group = relationship(A10ScalingGroup, backref='bindings')
lbaas_loadbalancer_id = sa.Column(sa.String(36),
unique=True,
nullable=False)
class A10ScalingGroupMember(models.A10Base):
"""A10 Scaling Group Member - switch/worker depending on 'role'"""
__tablename__ = "a10_scaling_group_members"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
type = sa.Column(sa.String(50), nullable=False)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255), nullable=False)
api_version = sa.Column(sa.String(12), nullable=False)
username = sa.Column(sa.String(255), nullable=False)
password = sa.Column(sa.String(255), nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
port = sa.Column(sa.Integer, nullable=False)
nova_instance_id = sa.Column(sa.String(36), nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
'polymorphic_on': type
}
def add_virtual_server(self, neutron_id, **kwargs):
vs = A10ScalingGroupMemberVirtualServer.create(
neutron_id=neutron_id,
**kwargs)
self.virtual_servers.append(vs)
return vs
def get_virtual_server(self, neutron_id):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServer).\
filter_by(member_id=self.id, neutron_id=neutron_id).\
first()
def delete_virtual_server(self, neutron_id):
vs = self.get_virtual_server(neutron_id)
if vs:
inspect(self).session.delete(vs)
class A10ScalingGroupWorker(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_workers"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupSwitch(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_switches"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupMemberVirtualServer(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_servers"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
member_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
nullable=False)
member = relationship('A10ScalingGroupMember',
backref=backref('virtual_servers', cascade='all, delete-orphan'))
neutron_id = sa.Column(sa.String(36),
nullable=False)
ip_address = sa.Column(sa.String(50), nullable=False)
interface_ip_address = sa.Column(sa.String(50), nullable=True)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
def add_port(self, port, **kwargs):
vs = A10ScalingGroupMemberVirtualServerPort.create(
port=port,
**kwargs)
self.ports.append(vs)
return vs
def get_port(self, port):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServerPort).\
filter_by(virtual_server_id=self.id, port=port).\
first()
def delete_port(self, port):
port = self.get_port(port)
if port:
inspect(self).session.delete(port)
class A10ScalingGroupMemberVirtualServerPort(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_server_ports"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
virtual_server_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_member_virtual_servers.id'),
nullable=False)
virtual_server = relationship('A10ScalingGroupMemberVirtualServer',
backref=backref('ports', cascade='all, delete-orphan'))
port = sa.Column(sa.Integer,
nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
class A10ScalingPolicy(models.A10Base):
__tablename__ = "a10_scaling_policies"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
cooldown = sa.Column(sa.Integer, nullable=False)
min_instances = sa.Column(sa.Integer, nullable=False)
max_instances = sa.Column(sa.Integer, nullable=True)
reactions = relationship('A10ScalingPolicyReaction',
order_by="A10ScalingPolicyReaction.position",
collection_class=ordering_list('position'),
backref='policy')
def scaling_group_ids(self):
return [sg.id for sg in self.scaling_groups]
class A10ScalingPolicyReaction(models.A10Base):
__tablename__ = "a10_scaling_policy_reactions"
# A surrogate key is required by ordering_list
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=False)
position = sa.Column(sa.Integer,
nullable=False)
alarm_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_alarms.id'),
nullable=False)
action_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_actions.id'),
nullable=False)
alarm = relationship('A10ScalingAlarm', backref='reactions')
action = relationship('A10ScalingAction', backref='reactions')
class A10ScalingAlarm(models.A10Base):
__tablename__ = "a10_scaling_alarms"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
aggregation = sa.Column(sa.String(50), nullable=False)
measurement = sa.Column(sa.String(50), nullable=False)
operator = sa.Column(sa.String(50), nullable=False)
threshold = sa.Column(sa.Float(), nullable=False)
unit = sa.Column(sa.String(50), nullable=False)
period = sa.Column(sa.Integer, nullable=False)
period_unit = sa.Column(sa.String(50), nullable=False)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
class A10ScalingAction(models.A10Base):
__tablename__ = "a10_scaling_actions"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
action = sa.Column(sa.String(50), nullable=False)
amount = sa.Column(sa.Integer)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
| apache-2.0 | -8,164,595,207,262,769,000 | 36.348276 | 96 | 0.599391 | false |
ppolewicz/ant-colony | antcolony/simulator.py | 1 | 1673 | from stats import QueenStats
from ant_move import AntStartMove
from edge import DummyEdgeEnd
class Simulator(object):
def __init__(self, reality, simulation_class, reality_processors):
self.reality = reality
self.simulation_class = simulation_class
self.reality_processors = reality_processors
def simulate(self, queen, amount_of_ants, stats_saver):
ant_classes = queen.spawn_ants(amount_of_ants)
ants = [ant_class(self.reality.environment_parameters) for ant_class in ant_classes]
anthills = self.reality.world.get_anthills()
antmoves = list(self.get_start_antmoves(ants, anthills))
for reality_processor in self.reality_processors:
reality_processor.set_ant_count(len(ants))
antmoves.extend(self.reality_processors)
stats = QueenStats(self.reality, len(ants), stats_saver)
simulation = self.simulation_class(self.reality, antmoves, stats)
return simulation
def get_results(self, simulation):
ticks = simulation.ticks
stats = simulation.stats
elapsed_time = self.reality.world.elapsed_time
return elapsed_time, ticks, stats
def reset(self):
self.reality.world.reset()
for reality_processor in self.reality_processors:
reality_processor.reset()
def get_start_antmoves(self, ants, anthills):
""" iterator """
counter = 0
number_of_anthills = len(anthills)
anthills = list(anthills)
for ant in ants:
anthill = anthills[counter % number_of_anthills]
yield AntStartMove(ant, DummyEdgeEnd(anthill))
counter += 1
| bsd-3-clause | 257,776,731,439,886,800 | 41.897436 | 92 | 0.663479 | false |
sixu05202004/newsmeme | newsmeme/newsmeme/views/account.py | 1 | 6900 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from flask import Module, flash, request, g, current_app, \
abort, redirect, url_for, session, jsonify
from flask.ext.mail import Message
from flask.ext.babel import gettext as _
from flask.ext.principal import identity_changed, Identity, AnonymousIdentity
from newsmeme.forms import ChangePasswordForm, EditAccountForm, \
DeleteAccountForm, LoginForm, SignupForm, RecoverPasswordForm
from newsmeme.models import User
from newsmeme.helpers import render_template
from newsmeme.extensions import db, mail
from newsmeme.permissions import auth
account = Module(__name__)
@account.route("/login/", methods=("GET", "POST"))
def login():
form = LoginForm(login=request.args.get("login", None),
next=request.args.get("next", None))
# TBD: ensure "next" field is passed properly
if form.validate_on_submit():
user, authenticated = \
User.query.authenticate(form.login.data,
form.password.data)
if user and authenticated:
session.permanent = form.remember.data
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
# check if openid has been passed in
openid = session.pop('openid', None)
if openid:
user.openid = openid
db.session.commit()
flash(_("Your OpenID has been attached to your account. "
"You can now sign in with your OpenID."), "success")
else:
flash(
_("Welcome back, %(name)s", name=user.username), "success")
next_url = form.next.data
if not next_url or next_url == request.path:
next_url = url_for('user.posts', username=user.username)
return redirect(next_url)
else:
flash(_("Sorry, invalid login"), "error")
return render_template("account/login.html", form=form)
@account.route("/signup/", methods=("GET", "POST"))
def signup():
form = SignupForm(next=request.args.get("next"))
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
flash(_("Welcome, %(name)s", name=user.username), "success")
next_url = form.next.data
if not next_url or next_url == request.path:
next_url = url_for('user.posts', username=user.username)
return redirect(next_url)
return render_template("account/signup.html", form=form)
@account.route("/logout/")
def logout():
flash(_("You are now logged out"), "success")
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for('frontend.index'))
@account.route("/forgotpass/", methods=("GET", "POST"))
def forgot_password():
form = RecoverPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
flash(_("Please see your email for instructions on "
"how to access your account"), "success")
user.activation_key = str(uuid.uuid4())
db.session.commit()
body = render_template("emails/recover_password.html",
user=user)
message = Message(subject=_("Recover your password"),
body=body,
sender=current_app.config.get(
'DEFAULT_MAIL_SENDER'),
recipients=[user.email])
mail.send(message)
return redirect(url_for("frontend.index"))
else:
flash(_("Sorry, no user found for that email address"), "error")
return render_template("account/recover_password.html", form=form)
@account.route("/changepass/", methods=("GET", "POST"))
def change_password():
user = None
if g.user:
user = g.user
elif 'activation_key' in request.values:
user = User.query.filter_by(
activation_key=request.values['activation_key']).first()
if user is None:
abort(403)
form = ChangePasswordForm(activation_key=user.activation_key)
if form.validate_on_submit():
user.password = form.password.data
user.activation_key = None
db.session.commit()
flash(_("Your password has been changed, "
"please log in again"), "success")
# 修改成功后,强制用户退出
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for("account.login"))
return render_template("account/change_password.html", form=form)
@account.route("/edit/", methods=("GET", "POST"))
@auth.require(401)
def edit():
form = EditAccountForm(g.user)
if form.validate_on_submit():
form.populate_obj(g.user)
db.session.commit()
flash(_("Your account has been updated"), "success")
return redirect(url_for("frontend.index"))
return render_template("account/edit_account.html", form=form)
@account.route("/delete/", methods=("GET", "POST"))
@auth.require(401)
def delete():
# confirm password & recaptcha
form = DeleteAccountForm()
if form.validate_on_submit():
db.session.delete(g.user)
db.session.commit()
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
flash(_("Your account has been deleted"), "success")
return redirect(url_for("frontend.index"))
return render_template("account/delete_account.html", form=form)
@account.route("/follow/<int:user_id>/", methods=("POST",))
@auth.require(401)
def follow(user_id):
user = User.query.get_or_404(user_id)
g.user.follow(user)
db.session.commit()
body = render_template("emails/followed.html",
user=user)
mail.send_message(subject=_("%s is now following you" % g.user.username),
body=body,
sender=current_app.config.get('DEFAULT_MAIL_SENDER'),
recipients=[user.email])
return jsonify(success=True,
reload=True)
@account.route("/unfollow/<int:user_id>/", methods=("POST",))
@auth.require(401)
def unfollow(user_id):
user = User.query.get_or_404(user_id)
g.user.unfollow(user)
db.session.commit()
return jsonify(success=True,
reload=True)
| bsd-3-clause | 1,973,843,528,547,501,600 | 26.733871 | 79 | 0.589852 | false |
vwc/agita | src/vwcollective.simplecontact/vwcollective/simplecontact/browser/contactfolderview.py | 1 | 2856 | from zope.interface import implements, Interface
from Acquisition import aq_inner
from Products.Five import BrowserView
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from vwcollective.simplecontact.interfaces import IContactFolder
from vwcollective.simplecontact.interfaces import ISimpleContact
from vwcollective.simplecontact.interfaces import IPreviewTagProvider
from vwcollective.simplecontact import simplecontactMessageFactory as _
class ContactFolderView(BrowserView):
"""
ContactFolder browser view
"""
template = ViewPageTemplateFile('contactfolderview.pt')
def __call__(self):
return self.template()
@property
def portal_catalog(self):
return getToolByName(self.context, 'portal_catalog')
@property
def portal(self):
return getToolByName(self.context, 'portal_url').getPortalObject()
def has_subfolders(self):
"""Test if we have subfolders"""
return len(self.contained_contactfolders()) > 0
def contained_contactfolders(self):
"""Query the catalog for contained ContactFolders in order to decide
wether to show a catagory preview or the simplecontacts directly"""
context = aq_inner(self.context)
return [dict(title=cf.Title,
description=cf.Description,
url=cf.getURL(),
preview_tag=IPreviewTagProvider(cf.getObject()).tag,
image=cf.getObject().image,
)
for cf in self.portal_catalog(object_provides=IContactFolder.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=1),
review_state='published',)
]
def contained_contacts(self):
"""List objects of type SimpleContact"""
context = aq_inner(self.context)
return [dict(title=c.Title,
url=c.getURL(),
profession=c.getObject().profession,
position=c.getObject().position,
email=c.getObject().email,
phone=c.getObject().phone,
image=c.getObject().image,
file=c.getObject().vita,
)
for c in self.portal_catalog(object_provides=ISimpleContact.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=1),
sort_on='getObjPositionInParent',
review_state='published',)
]
| mit | -2,913,841,775,823,524,000 | 41 | 98 | 0.569678 | false |
RoyShulman/openstack-project | cinder_functions.py | 1 | 2028 | from cinderclient.v2 import client as cinderClient
import easygui
class Cinder:
def __init__(self, keystone_session):
self.cinder_client = cinderClient.Client(session=keystone_session)
def create_volume(self, instance_name):
"""
Create an empty block volume for an instance. Volume will be name INSTANCE_NAME + "Volume"
:param name: Name of the instance the volume will be added to
"""
try:
name = instance_name + "Volume"
self.cinder_client.volumes.create(size=1000, name=instance_name)
except Exception, e:
easygui.msgbox("Something went wrong, please try again")
finally:
return
def list_volumes(self):
"""
List all available volumes
:return: all available volumes
"""
try:
return self.cinder_client.volumes
except Exception, e:
print e
easygui.msgbox("Something went wrong, please try again")
return
def get_volume_id(self, volume_name):
"""
Return the volume ID of a given volume name
:param volume_name: Name of the volume
:return: string of the unique of ID
"""
try:
for volume in self.list_volumes():
if volume.name == volume_name:
return volume.id
except Exception, e:
print e
easygui.msgbox("Something went wrong please try again")
return
def attach_volume(self, instance_id, instance_name):
"""
Attach a volume to an instance
:param instance_id: Unique ID of the instance
:param instance_name: Name of the instance
"""
volume_id = self.get_volume_id(instance_name + "Volume")
try:
self.cinder_client.volumes.attach(volume_id, instance_id)
except Exception, e:
print e
easygui.msgbox("Something went wrong please try again")
return
| mit | -8,199,254,290,680,970,000 | 32.245902 | 98 | 0.582347 | false |
mtik00/yamicache | setup.py | 1 | 1167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = ['pytest-runner']
test_requirements = ['pytest']
setup(
name='yamicache',
version='0.6.0',
description="Yet another in-memory caching package",
long_description=readme + '\n\n' + history,
author="Timothy McFadden",
author_email='[email protected]',
url='https://github.com/mtik00/yamicache',
packages=find_packages(include=['yamicache']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=True,
keywords='yamicache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| mit | 1,442,082,234,181,209,600 | 26.785714 | 56 | 0.652099 | false |
fosfataza/protwis | common/diagrams_arrestin.py | 1 | 24369 | from common.diagrams import Diagram
from common.definitions import ARRESTIN_SEGMENTS
from residue.models import Residue
from residue.models import ResidueGenericNumber
from residue.models import ResidueNumberingScheme
from django.utils.safestring import mark_safe
from math import cos, sin, pi, floor, sqrt
from datetime import datetime
from collections import OrderedDict
class DrawArrestinPlot(Diagram):
def __init__(self, residue_list, protein_class, protein_name, nobuttons = None):
self.nobuttons = 'arrestin'
self.type = 'snakeplot'
self.receptorId = protein_name
self.family = protein_class
self.output = ''
# residueType = 'sp'
# FIXME DO PUREIMAGE
# $pureImage = isset($_GET['pureimage']) && $_GET['pureimage'] == 'TRUE' ? TRUE : FALSE;
# get sequence, baldwin, and bw information of this receptor
self.sequence = residue_list
self.segments = {}
self.segments_full = OrderedDict()
i = 0
for r in self.sequence:
if r.protein_segment:
segment = str(r.protein_segment.slug)
elif r.segment_slug: # from family aligment
segment = str(r.segment_slug)
if segment not in self.segments:
self.segments[segment] = []
self.segments_full[segment] = r.protein_segment
label = ''
displaylabel = ''
if r.generic_number:
label = r.generic_number.label
elif hasattr(r, 'family_generic_number'):
label = r.family_generic_number
if r.display_generic_number: displaylabel = r.display_generic_number.label
displaylabel = r.amino_acid + str(r.sequence_number) + " \n " + displaylabel
if hasattr(r, 'frequency'):
displaylabel = displaylabel + "\n" + r.frequency
self.segments[segment].append([r.sequence_number,r.amino_acid,label,displaylabel])
i += 1
# for helix_num in range(1,2): #FIX for missing generic numbers
# rs = self.segments['H5']
# for i in range(0,len(rs)):
# if not rs[i][2]:
# if i+1<len(rs): #if there is a next one
# if rs[i+1][2]: #if it has generic number
# number = str(int(rs[i+1][2].split('x')[1])-1)
# rs[i][2] = str(helix_num) + "x" + number
# print(rs[i][2])
self.helixWidth = 75 # Width of helix
self.resNumPerRow = 4 # Residue number per row in helix
self.angleDeg = 22.0 # Angle size of each helix turn
self.residue_radius = 12 # Radius of the residue circle
# svg image padding offset
self.offsetX = -40 # -200
self.offsetY = 0 # -50
# margin between two helixes
self.margin = 0
# highest and lowest bound of this svg
self.high = 0
self.low = 0
# keep track of max Y positions of intra/extra loops
self.maxY = {'bottom': 0, 'top': 0}
self.maxX = {'left': 0, 'right': 0}
# helices length
# helicesLength = Svg::getSnakePlotHelicesLength($baldwin, $helixWidth, $angleDeg) #FIXME
# top and bottom residue coords in each helix
self.TBCoords = {}
self.output = ""
self.traceoutput = ""
self.helixoutput = ""
self.count = 1
self.count_sheet = 0
for s in ARRESTIN_SEGMENTS['Full']:
if self.segments_full[s].category == 'helix':
self.helixoutput += self.drawSnakePlotHelix(s)
self.count += 1
if self.segments_full[s].category == 'sheet':
self.helixoutput += self.drawSnakePlotSheet(s)
self.count += 1
self.count_sheet += 1
self.count = 0
for s in ARRESTIN_SEGMENTS['Full']:
if self.segments_full[s].category == 'loop' and s != 's19c':
#pass
try:
self.drawSnakePlotLoop(s)
except:
print(s)
else:
self.count += 1
self.drawSnakePlotTerminals()
def __str__(self):
self.output = "<g id=snake transform='translate(0, " + str(-self.low+ self.offsetY) + ")'>" + self.traceoutput+self.output+self.helixoutput+self.drawToolTip() + "</g>"; #for resizing height
return mark_safe(self.create(self.output,self.maxX['right']+40,self.high-self.low+self.offsetY*2,"snakeplot", self.nobuttons))
def drawSnakePlotHelix(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num % 2 != 0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = self.helixWidth + 40 + self.offsetX + (self.margin + self.helixWidth) * (helix_num - 1) - (self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if ((helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number)) and i!=0:
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX-row_pos*self.residue_radius*1.6+indentX+bulgeX)
# Move down with right amount
y = round(startY+row*self.residue_radius*2.4+row_pos*self.residue_radius*0.5+indentY+bulgeY)
output_residue = self.DrawResidue(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotSheet(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = 10+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*10)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if (helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number):
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
#output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX) #+indentX+bulgeX
# Move down with right amount
y = round(startY+i*self.residue_radius*1.5)
output_residue = self.DrawResidueSquare(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
# output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotLoop(self, segment):
y_offset = 20
font_size = 12
font_family = 'courier'
bezier_pull = 90
name = segment
x_at_max_y = 0
rs = self.segments[segment] # get residues
if self.count % 2 == 0:
position = 'bottom'
orientation = 1
else:
position = 'top'
orientation = -1
# what happens here?
if self.count not in self.TBCoords:
return 0
# Get positions of two linking residues from each helix
x1 = self.TBCoords[self.count][position][0]
y1 = self.TBCoords[self.count][position][1]
x2 = self.TBCoords[self.count + 1][position][0]
y2 = self.TBCoords[self.count + 1][position][1]
boxX = (x1+x2)/2 # midway between
if position=='top':
boxY = min(y1,y2)-y_offset # over helix
y_indent = -1*bezier_pull
if position=='bottom':
boxY = max(y1, y2) + y_offset # over helix
y_indent = bezier_pull
points = str(x1)+","+str(y1)+" "+str(boxX)+","+str(boxY)+" "+str(x2)+","+str(y2)
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
# Getting midpoint of Bezier curve http://www.svgbasics.com/curves.html
Dx = ((x1+boxX)/2)
Ex = ((x2+boxX)/2)
Fx = (Dx+Ex)/2
Dy = ((y1+boxY+y_indent)/2)
Ey = ((y2+boxY+y_indent)/2)
Fy = (Dy+Ey)/2
y_indent = y_indent*len(rs)/5 # get an approx need for y_indent for size of loop
super_loop_long_length = 40
between_residues = 18
length_of_residues_in_loop = len(rs)*between_residues-self.residue_radius
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
if len(rs)<super_loop_long_length:
tries = 0 # adjust size
while abs(length-length_of_residues_in_loop-70)>5:
# print(abs(length-length_of_residues_in_loop+100),length,length_of_residues_in_loop,tries)
if length-length_of_residues_in_loop-70>5:
y_indent *=0.9
else:
y_indent *=1.1
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
tries += 1
if tries>100:
break
pos = (length-length_of_residues_in_loop)/2 # get start pos
prev_where = [x1, y1]
# make rounded arc
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
labelbox = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,length/2)
labelbox[1][1] += orientation*40
self.output += "<path class='"+name+"' d='" + points2 + "' stroke='black' fill='none' stroke-width='2' />"
max_y = y1
for i in range(0,len(rs)):
r = rs[i]
where = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,pos)
self.output += self.DrawResidue(where[1][0],where[1][1],r[1], r[0], r[3], self.residue_radius-1,name)
pos += between_residues
if where[1][1]>self.high: self.high = where[1][1]
if where[1][1]<self.low: self.low = where[1][1]
prev_where = where[1][0],where[1][1]
if orientation==-1:
if where[1][1]<self.maxY[position]: self.maxY[position] = where[1][1]
else:
if where[1][1]>self.maxY[position]: self.maxY[position] = where[1][1]
if orientation==-1:
if where[1][1]<max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
else:
if where[1][1]>max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
x_at_max_y = where[1][0]
if orientation == 1:
max_y = max_y+25
else:
max_y = max_y-20
self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y-24)+" y="+str(max_y-13)+" rx=5 ry=5 width='55' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y)+" y="+str(max_y)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
def drawSnakePlotTerminals(self):
y_offset = 50
font_size = 12
font_family = 'helvetica'
bezier_pull = 80
between_residues = 18
for name in ['ns1', 's19c']:
drawn_residues = []
if name not in self.segments: continue # continue if no terminus
rs = self.segments[name] # get residues
if name == 'ns1':
orientation = 1
# y_max = self.maxY['extra']-between_residues*4
position = 'bottom'
linked_helix = 1
y_max = self.TBCoords[linked_helix][position][1] + 200
x_max = self.maxX['right'] - 300
rs.reverse()
else:
orientation = 1
# y_max = self.maxY['intra']+between_residues*4
position = 'bottom'
linked_helix = 20
y_max = self.TBCoords[linked_helix][position][1] + 200
x_max = self.maxX['left'] - 300
x1 = self.TBCoords[linked_helix][position][0]
y1 = self.TBCoords[linked_helix][position][1]
# Get positions of two linking residues from each helix
x2 = x1 - 30
y2 = y1 + 80 * orientation
# Make line and box for short version
points = "M "+str(x1)+" "+str(y1)+" Q"+str(x1+30)+" "+str(y2)+" "+str(x2)+" "+str(y2)
self.output += "<path class='"+name+" short' d='" + points + "' stroke='black' fill='none' stroke-width='2' />"
self.output += "<rect class='"+name+" short segment' onclick='toggleLoop(\"."+name+"\",\"short\");' x="+str(x2-25)+" y="+str(y2-13)+" rx=5 ry=5 width='50' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text class='"+name+" short segment' onclick='toggleLoop(\"."+name+"\",\"short\");' x="+str(x2)+" y="+str(y2)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
x2 = x1-90*orientation
y2 = y_max
bezierX = x1+60*orientation
bezierY = (y_max+y1)/2+60*orientation
points = "M "+str(x1)+" "+str(y1)+" Q"+str(bezierX)+" "+str(bezierY)+" "+str(x2)+" "+str(y2)
pos = 40
length = self.lengthbezier([x1,y1],[bezierX,bezierY],[x2,y2],0.001)
bend = 0
distance_between_rows = 30
pos_bend = 0
bend_direction = -1 * orientation
for i in range(0,len(rs)):
r = rs[i]
if pos<length:
where = self.wherebezier([x1,y1],[bezierX,bezierY],[x2,y2],0.001,pos)
else:
if pos_bend==0 and bend!=0: #if first residue in line put in middle
where[1][0] = where[1][0]-between_residues*bend_direction
#where[1][0] = where[1][0]
where[1][1] = where[1][1]+orientation*distance_between_rows/2
elif pos_bend==between_residues and bend!=0: #if 2nd residue in line put in middle
#where[1][0] = where[1][0]-between_residues*bend_direction
where[1][0] = where[1][0]+between_residues*bend_direction
where[1][1] = where[1][1]+orientation*distance_between_rows/2
else:
where[1][0] = where[1][0]+between_residues*bend_direction
where[1][1] = where[1][1]
last_bend_x = where[1][0]
last_bend_y = where[1][1]
pos_bend += between_residues
if pos_bend>=abs(x2-x_max)-40: #no more bend left
pos_bend = 0
bend += 1
if bend_direction==1:
bend_direction = -1
elif bend_direction==-1:
bend_direction = 1
if i==0: self.output += "<line class='"+name+" long' x1="+str(x1)+" y1="+str(y1)+" x2="+str(where[1][0])+" y2="+str(where[1][1])+" stroke='black' fill='none' stroke-width='2' stroke-dasharray2='1,1' />"
if bend==0: labely = where[1][1]
drawn_residues.append(self.DrawResidue(where[1][0],where[1][1],r[1], r[0], rs[i][3], self.residue_radius-1,name+" long"))
pos += between_residues
if where[1][1]<self.low: self.low = where[1][1]
if where[1][1]>self.high: self.high = where[1][1]
if name=='s19c': drawn_residues = drawn_residues[::-1]
self.output += ''.join(drawn_residues)
self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+" long segment' x="+str(self.TBCoords[linked_helix][position][0]-40*orientation-25)+" y="+str((labely+self.TBCoords[linked_helix][position][1])/2-13)+" rx=5 ry=5 width='50' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+" long segment' x="+str(self.TBCoords[linked_helix][position][0]-40*orientation)+" y="+str((labely+self.TBCoords[linked_helix][position][1])/2)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
| apache-2.0 | -5,423,738,260,072,005,000 | 40.443878 | 384 | 0.507448 | false |
consbio/python-databasin | tests/test_client.py | 1 | 15329 | from __future__ import absolute_import
import copy
import json
import zipfile
import pytest
import requests_mock
import six
from requests.models import Request
from databasin.client import Client
from databasin.exceptions import DatasetImportError
from .utils import make_api_key_callback
try:
from unittest import mock # Py3
except ImportError:
import mock # Py2
try:
import __builtin__ as builtins
except ImportError:
import builtins
LOGIN_URL = 'https://databasin.org/auth/api/login/'
@pytest.fixture()
def dataset_import_data():
return {
'id': 'a1b2c3',
'owner_id': 'user',
'private': False,
'title': 'Some Import',
'description': 'This dataset is a dataset.',
'create_date': '2015-11-17T22:42:06+00:00',
'modify_date': '2015-11-17T22:42:06+00:00',
'native': True,
'tags': ['one', 'two'],
'credits': None,
'failed': False,
'is_dataset_edit': False
}
@pytest.fixture()
def dataset_data():
return {
'id': 'a1b2c3',
'owner_id': 'user',
'private': False,
'title': 'Some Dataset',
'snippet': 'This dataset is...',
'create_date': '2015-11-17T22:42:06+00:00',
'modify_date': '2015-11-17T22:42:06+00:00',
'native': True,
'tags': ['one', 'two'],
'credits': None
}
@pytest.fixture
def import_job_data():
return {
'id': '1234',
'job_name': 'create_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/import/a1b2c3/overview/'})
}
@pytest.fixture
def import_netcdf_job_data():
return {
'id': '1234',
'job_name': 'create_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/a1b2c3/'})
}
@pytest.fixture
def finalize_job_data():
return {
'id': '1235',
'job_name': 'finalize_import_job',
'status': 'succeeded',
'progress': 100,
'message': json.dumps({'next_uri': '/datasets/a1b2c3/'})
}
@pytest.fixture
def tmp_file_data():
return {
'uuid': 'abcd',
'date': '2015-11-17T22:42:06+00:00',
'is_image': False,
'filename': '',
'url': 'https://example.com/file.txt'
}
def test_alternative_host():
c = Client('example.com:81')
assert c.base_url == 'https://example.com:81'
def test_https_referer():
"""Django requires all POST requests via HTTPS to have the Referer header set."""
c = Client()
r = c._session.prepare_request(Request('POST', LOGIN_URL))
c._session.get_adapter(LOGIN_URL).add_headers(r)
assert r.headers['Referer'] == LOGIN_URL
def test_login():
with requests_mock.mock() as m:
m.get('https://databasin.org/', cookies={'csrftoken': 'abcd'})
m.post(LOGIN_URL, cookies={'sessionid': 'asdf'})
c = Client()
c.login('foo', 'bar')
assert m.call_count == 2
def test_login_no_redirect():
with requests_mock.mock() as m:
m.get('https://databasin.org/redirect/')
m.get('https://databasin.org/', cookies={'csrftoken': 'abcd'})
m.get(LOGIN_URL, cookies={'csrftoken': 'abcd'})
m.post(
LOGIN_URL, headers={'Location': 'https://databasin.org/'}, cookies={'sessionid': 'asdf'}, status_code=302
)
c = Client()
c.login('foo', 'bar')
assert m.call_count == 2
assert not any(r.url for r in m.request_history if r.url == 'https://databasin.org/redirect/')
def test_import_lpk(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1235/'})
m.get('https://databasin.org/api/v1/jobs/1235/', text=json.dumps(finalize_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = six.BytesIO()
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_lpk('test.lpk')
open_mock.assert_called_once_with('test.lpk', 'rb')
assert m.call_count == 7
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'ArcGIS_Native'
def test_import_lpk_with_api_key(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
key = 'abcdef123456'
with requests_mock.mock() as m:
m.post(
'https://databasin.org/uploads/upload-temporary-file/',
text=make_api_key_callback(json.dumps({'uuid': 'abcd'}), key)
)
m.get(
'https://databasin.org/api/v1/uploads/temporary-files/abcd/',
text=make_api_key_callback(json.dumps(tmp_file_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/',
headers={'Location': '/api/v1/jobs/1234/'},
text=make_api_key_callback('', key)
)
m.get(
'https://databasin.org/api/v1/jobs/1234/',
text=make_api_key_callback(json.dumps(import_job_data), key)
)
m.get(
'https://databasin.org/api/v1/dataset_imports/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_import_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/',
headers={'Location': '/api/v1/jobs/1235/'},
text=make_api_key_callback('', key)
)
m.get('https://databasin.org/api/v1/jobs/1235/', text=make_api_key_callback(json.dumps(finalize_job_data), key))
m.get(
'https://databasin.org/api/v1/datasets/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_data), key)
)
f = six.BytesIO()
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
c.set_api_key('user', key)
dataset = c.import_lpk('test.lpk')
open_mock.assert_called_once_with('test.lpk', 'rb')
assert m.call_count == 7
assert dataset.id == 'a1b2c3'
def test_import_lpk_with_xml(import_job_data, dataset_data, dataset_import_data, finalize_job_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/datasets/1234/import/metadata/')
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1235/'})
m.get('https://databasin.org/api/v1/jobs/1235/', text=json.dumps(finalize_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = mock.Mock()
f.read = mock.Mock(return_value='')
f.__enter__ = mock.Mock(return_value=f)
f.__exit__ = mock.Mock(return_value=f)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_lpk('test.lpk', 'test.xml')
open_mock.assert_any_call('test.xml')
open_mock.assert_any_call('test.lpk', 'rb')
assert m.call_count == 8
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'ArcGIS_Native'
def test_import_netcdf_dataset_with_zip(import_netcdf_job_data, dataset_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_netcdf_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
zf.writestr('style.json', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_netcdf_dataset('test.zip')
open_mock.assert_called_once_with('test.zip', 'a+b')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_nc(import_netcdf_job_data, dataset_data, tmp_file_data):
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_netcdf_job_data))
m.get('https://databasin.org/api/v1/datasets/a1b2c3/', text=json.dumps(dataset_data))
with mock.patch.object(zipfile, 'ZipFile', mock.MagicMock()) as zf_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
dataset = c.import_netcdf_dataset('test.nc', style={'foo': 'bar'})
zf_mock().write.assert_called_once_with('test.nc', 'test.nc')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_api_key(import_netcdf_job_data, dataset_data, tmp_file_data):
key = 'abcde12345'
with requests_mock.mock() as m:
m.post(
'https://databasin.org/uploads/upload-temporary-file/',
text=make_api_key_callback(json.dumps({'uuid': 'abcd'}), key)
)
m.get(
'https://databasin.org/api/v1/uploads/temporary-files/abcd/',
text=make_api_key_callback(json.dumps(tmp_file_data), key)
)
m.post(
'https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'},
text=make_api_key_callback('', key)
)
m.get(
'https://databasin.org/api/v1/jobs/1234/',
text=make_api_key_callback(json.dumps(import_netcdf_job_data), key)
)
m.get(
'https://databasin.org/api/v1/datasets/a1b2c3/',
text=make_api_key_callback(json.dumps(dataset_data), key)
)
with mock.patch.object(zipfile, 'ZipFile', mock.MagicMock()) as zf_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
c.set_api_key('user', key)
dataset = c.import_netcdf_dataset('test.nc', style={'foo': 'bar'})
zf_mock().write.assert_called_once_with('test.nc', 'test.nc')
assert m.call_count == 5
assert dataset.id == 'a1b2c3'
request_data = json.loads(m.request_history[2].text)
assert request_data['job_name'] == 'create_import_job'
assert request_data['job_args']['file'] == 'abcd'
assert request_data['job_args']['dataset_type'] == 'NetCDF_Native'
def test_import_netcdf_dataset_with_invalid_file():
c = Client()
with pytest.raises(ValueError):
c.import_netcdf_dataset('test.foo')
def test_import_netcdf_dataset_with_no_style():
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
with pytest.raises(ValueError):
c.import_netcdf_dataset('test.zip')
def test_import_netcdf_dataset_incomplete(import_job_data, tmp_file_data, dataset_import_data):
import_job_data = copy.copy(import_job_data)
import_job_data['message'] = json.dumps({'next_uri': '/datasets/import/a1b2c3/overview/'})
with requests_mock.mock() as m:
m.post('https://databasin.org/uploads/upload-temporary-file/', text=json.dumps({'uuid': 'abcd'}))
m.get('https://databasin.org/api/v1/uploads/temporary-files/abcd/', text=json.dumps(tmp_file_data))
m.post('https://databasin.org/api/v1/jobs/', headers={'Location': '/api/v1/jobs/1234/'})
m.get('https://databasin.org/api/v1/jobs/1234/', text=json.dumps(import_job_data))
m.get('https://databasin.org/api/v1/dataset_imports/a1b2c3/', text=json.dumps(dataset_import_data))
m.delete('https://databasin.org/api/v1/dataset_imports/a1b2c3/')
f = six.BytesIO()
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('test.nc', '')
zf.writestr('style.json', '')
f.seek(0)
with mock.patch.object(builtins, 'open', mock.Mock(return_value=f)) as open_mock:
c = Client()
c._session.cookies['csrftoken'] = 'abcd'
with pytest.raises(DatasetImportError):
c.import_netcdf_dataset('test.zip')
assert m.call_count == 6
| bsd-3-clause | -4,360,527,989,050,836,500 | 38.507732 | 120 | 0.591363 | false |
mark-me/Pi-Jukebox | venv/Lib/site-packages/pygame/tests/test_utils/test_machinery.py | 1 | 2404 | import inspect
import random
import re
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import import_submodule
class PygameTestLoader(unittest.TestLoader):
def __init__(self, randomize_tests=False, include_incomplete=False,
exclude=('interactive',)):
super(PygameTestLoader, self).__init__()
self.randomize_tests = randomize_tests
if exclude is None:
self.exclude = set()
else:
self.exclude = set(exclude)
if include_incomplete:
self.testMethodPrefix = ('test', 'todo_')
def getTestCaseNames(self, testCaseClass):
res = []
for name in super(PygameTestLoader, self).getTestCaseNames(testCaseClass):
tags = get_tags(testCaseClass, getattr(testCaseClass, name))
if self.exclude.isdisjoint(tags):
res.append(name)
if self.randomize_tests:
random.shuffle(res)
return res
# Exclude by tags:
TAGS_RE = re.compile(r"\|[tT]ags:(-?[ a-zA-Z,0-9_\n]+)\|", re.M)
class TestTags:
def __init__(self):
self.memoized = {}
self.parent_modules = {}
def get_parent_module(self, class_):
if class_ not in self.parent_modules:
self.parent_modules[class_] = import_submodule(class_.__module__)
return self.parent_modules[class_]
def __call__(self, parent_class, meth):
key = (parent_class, meth.__name__)
if key not in self.memoized:
parent_module = self.get_parent_module(parent_class)
module_tags = getattr(parent_module, '__tags__', [])
class_tags = getattr(parent_class, '__tags__', [])
tags = TAGS_RE.search(inspect.getdoc(meth) or '')
if tags: test_tags = [t.strip() for t in tags.group(1).split(',')]
else: test_tags = []
combined = set()
for tags in (module_tags, class_tags, test_tags):
if not tags: continue
add = set([t for t in tags if not t.startswith('-')])
remove = set([t[1:] for t in tags if t not in add])
if add: combined.update(add)
if remove: combined.difference_update(remove)
self.memoized[key] = combined
return self.memoized[key]
get_tags = TestTags()
| agpl-3.0 | -5,849,633,297,597,302,000 | 29.05 | 82 | 0.576123 | false |
buxx/TextDataExtractor | tests/TestData.py | 1 | 2596 | from tde.exceptions import CantExtractData
from tests.src.Base import Base
from tests.src.data import WikipediaLetterCountTextFileData, WikipediaWordCountTextFileData, \
WikipediaCategoryCountTextFilesData, WikipediaLetterCountHTMLFileData, WikipediaWordCountHTMLFileData, \
WikipediaCategoryCountHTMLFilesData
class TestInspector(Base):
def _get_content_of_file(self, file_path):
with open(file_path) as file_content:
return file_content.read()
def test_wikipedia_text_data(self):
letter_count = WikipediaLetterCountTextFileData()
letter_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.txt'))
self.assertEquals({'Évolution (biologie)': 55251}, letter_count.get_data())
word_count = WikipediaWordCountTextFileData()
word_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.txt'))
self.assertEquals({'Évolution (biologie)': 3378}, word_count.get_data())
category_count = WikipediaCategoryCountTextFilesData()
category_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.txt'))
self.assertEquals({'Science': 1}, category_count.get_data())
category_count.swallow(self._get_content_of_file('tests/src/source_files/relativite.txt'))
self.assertEquals({'Science': 2}, category_count.get_data())
def test_wikipedia_html_data(self):
letter_count = WikipediaLetterCountHTMLFileData()
letter_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.html'))
self.assertEquals({'Évolution (biologie)': 59883}, letter_count.get_data())
word_count = WikipediaWordCountHTMLFileData()
word_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.html'))
self.assertEquals({'Évolution (biologie)': 3460}, word_count.get_data())
category_count = WikipediaCategoryCountHTMLFilesData()
category_count.swallow(self._get_content_of_file('tests/src/source_files/evolution.html'))
self.assertEquals({'Science': 1}, category_count.get_data())
category_count.swallow(self._get_content_of_file('tests/src/source_files/relativite.html'))
self.assertEquals({'Science': 2}, category_count.get_data())
def test_cant_extract(self):
letter_count = WikipediaLetterCountHTMLFileData()
courgettes_farcies_text = self._get_content_of_file('tests/src/source_files/aubergines_farcies.txt')
self.assertRaises(CantExtractData, letter_count.swallow, courgettes_farcies_text)
| gpl-2.0 | 8,680,718,928,001,855,000 | 54.148936 | 108 | 0.721451 | false |
ujdhesa/unisubs | unisubs_settings.py | 1 | 3595 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import timedelta
from settings import *
from server_local_settings import *
DEBUG = False
ADMINS = (
('Craig Zheng', '[email protected]'),
('universalsubtitles-errors', '[email protected]')
)
if INSTALLATION == DEV:
ADMINS = (
('Evan', '[email protected]'),
)
SITE_ID = 16
SITE_NAME = 'unisubsdev'
REDIS_DB = "3"
EMAIL_SUBJECT_PREFIX = '[usubs-dev]'
SENTRY_TESTING = True
SOLR_ROOT = '/usr/share/'
CELERY_TASK_RESULT_EXPIRES = timedelta(days=7)
elif INSTALLATION == STAGING:
SITE_ID = 17
SITE_NAME = 'unisubsstaging'
REDIS_DB = "2"
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
EMAIL_SUBJECT_PREFIX = '[usubs-staging]'
CELERY_TASK_RESULT_EXPIRES = timedelta(days=7)
elif INSTALLATION == PRODUCTION:
SITE_ID = 18
SITE_NAME = 'unisubs'
REDIS_DB = "1"
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
EMAIL_SUBJECT_PREFIX = '[usubs-production]'
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
ADMINS = (
('universalsubtitles-errors', '[email protected]'),
)
# only send actual email on the production server
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
elif INSTALLATION == DEMO:
DEBUG = True
REDIS_DB = "4"
SENTRY_TESTING = True
elif INSTALLATION == LOCAL:
SITE_ID = 14
SITE_NAME = 'unisubsstaging'
ADMINS = (
('Evan', '[email protected]'),
)
if INSTALLATION == STAGING or INSTALLATION == PRODUCTION or INSTALLATION == LOCAL:
DATABASE_ROUTERS = ['routers.UnisubsRouter']
AWS_STORAGE_BUCKET_NAME = DEFAULT_BUCKET
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
SOLR_ROOT = '/usr/share/'
CELERYD_LOG_LEVEL = 'INFO'
CELERY_REDIRECT_STDOUTS = True
CELERY_REDIRECT_STDOUTS_LEVEL = 'INFO'
RECAPTCHA_PUBLIC = '6LftU8USAAAAADia-hmK1RTJyqXjFf_T5QzqLE9o'
IGNORE_REDIS = True
ALARM_EMAIL = FEEDBACK_EMAILS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
'PORT': '3306'
}
}
DATABASES.update(uslogging_db)
USE_AMAZON_S3 = AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and DEFAULT_BUCKET
try:
from settings_local import *
except ImportError:
pass
if USE_AMAZON_S3:
AWS_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME
COMPRESS_MEDIA = not DEBUG
STATIC_URL_BASE = STATIC_URL
if COMPRESS_MEDIA:
STATIC_URL += "%s/%s/" % (COMPRESS_OUTPUT_DIRNAME, LAST_COMMIT_GUID.split("/")[1])
ADMIN_MEDIA_PREFIX = "%sadmin/" % STATIC_URL_BASE
# the keyd cache apps need this:
CACHE_TIMEOUT = 60
CACHE_PREFIX = "unisubscache"
| agpl-3.0 | 6,851,062,375,894,113,000 | 28.467213 | 86 | 0.691516 | false |
chengsoonong/crowdastro | crowdastro/active_learning/random_sampler.py | 1 | 2413 | """Learning with random sampling.
Pool-based. Binary class labels.
Matthew Alger
The Australian National University
2016
"""
import numpy
from .sampler import Sampler
class RandomSampler(Sampler):
"""Pool-based learning with random sampling."""
def sample_index(self):
"""Finds index of a random unlabelled point."""
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled):
index = numpy.random.choice(unlabelled)
return index
return 0
def sample_indices(self, n):
"""Finds indices of n random unlabelled points."""
indices = set()
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled) < n:
return unlabelled
while len(indices) < n:
index = numpy.random.choice(unlabelled)
indices.add(index)
return sorted(indices)
class BalancedSampler(RandomSampler):
"""Pool-based learning with balanced random sampling.
WARNING: This class can "peek" at the true labels!
"""
def sample_index(self):
"""Finds index of a random unlabelled point."""
unlabelled = self.labels.mask.nonzero()[0]
unlabelled_groundtruth = self.labels.data[unlabelled]
if len(unlabelled):
if numpy.random.random() < 0.5:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 1])
else:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 0])
return index
return 0
def sample_indices(self, n):
"""Finds indices of n random unlabelled points."""
indices = set()
unlabelled = self.labels.mask.nonzero()[0]
if len(unlabelled) < n:
return unlabelled
unlabelled_groundtruth = self.labels.data[unlabelled]
while len(indices) < n:
if ((numpy.random.random() < 0.5 and
len(unlabelled[unlabelled_groundtruth == 1]) > 0) or
len(unlabelled[unlabelled_groundtruth == 0]) == 0):
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 1])
else:
index = numpy.random.choice(
unlabelled[unlabelled_groundtruth == 0])
indices.add(index)
return sorted(indices)
| mit | 8,591,367,075,360,485,000 | 27.388235 | 72 | 0.581434 | false |
0xF1/nessus_tools | mobile_devices_parser.py | 1 | 2836 | #!/usr/bin/env python
'''
mobile devices parser
Version 0.1
by Roy Firestein ([email protected])
Parse mobile devices audit plugin and export to CSV
'''
import os
import xml.dom.minidom
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="file", help="Nessus file to parse")
parser.add_option("-o", "--output", action="store", type="string", dest="output", help="output file name")
(menu, args) = parser.parse_args()
devices = {"Android": [], "iPhone": [], "iPad": []}
def main():
nes_file = menu.file
report = xml.dom.minidom.parse(nes_file)
for el in report.getElementsByTagName('ReportItem'):
if el.getAttribute("pluginID") == "60035":
# find plugin_output element
output = get_plugin_output(el)
model = get_model(output)
version = get_version(output)
user = get_user(output)
serial = get_serial(output)
item = {"serial": serial, "version": version, "user": user}
if not item in devices[model]:
devices[model].append(item)
print "%s\t%s\t%s\t%s" %(model, version, user, serial)
if len(devices['iPhone']) > 0 or len(devices['iPad']) > 0 or len(devices['Android']) > 0:
save_csv(devices)
def save_csv(devices):
fh = open(menu.output, "w")
fh.write("Platform,User,Version,Serial\n")
for d in devices['iPhone']:
fh.write('"%s","%s","%s","%s"\n' %("iPhone", d['user'], d['version'], d['serial']))
for d in devices['iPad']:
fh.write('"%s","%s","%s","%s"\n' %("iPad", d['user'], d['version'], d['serial']))
for d in devices['Android']:
fh.write('"%s","%s","%s","%s"\n' %("Android", d['user'], d['version'], d['serial']))
fh.close()
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def get_plugin_output(el):
a = el.getElementsByTagName("plugin_output")[0]
return getText(a.childNodes)
def get_model(data):
for line in data.split("\n"):
if line.startswith("Model"):
return line.split(" ")[2]
return None
def get_version(data):
for line in data.split("\n"):
if line.startswith("Version"):
return line.split(" ")[2]
return None
def get_user(data):
for line in data.split("\n"):
if line.startswith("User"):
return line.split(" ")[2]
return None
def get_serial(data):
for line in data.split("\n"):
if line.startswith("Serial"):
return line.split(" ")[3]
return None
if __name__ == "__main__":
main()
| apache-2.0 | -6,459,428,287,161,355,000 | 26.269231 | 107 | 0.554302 | false |
tjsteele/battle-simulator | source/main.py | 1 | 3135 | import random, time
currPlayerHitToken = True
currOpponentHitToken = True
class GenerateMonster():
def __init__(self, name, health, gold, weapon, ac):
self.name = name
self.health = health
self.gold = gold
self.weapon = weapon
self.ac = ac
def checkMissPlayer(defender):
"""
Returns a boolean token: if player missed or not.
If defenders AC (Armor Class) is above players hit the token will evaluate to False,
and the player will respectively miss.
"""
global currPlayerHitToken
missChance = random.randrange(0, 25)
if missChance <= defender:
currPlayerHitToken = False
return currPlayerHitToken
else:
currPlayerHitToken = True
return currPlayerHitToken
def checkMissOpponent(defender):
"""
Returns a boolean token: if opponent missed or not.
If defenders AC (Armor Class) is above opponents hit, the token will evaluate to False,
and the opponent will respectively miss.
"""
global currOpponentHitToken
missChance = random.randrange(0, 25) # make this variable
if missChance <= defender:
currOpponentHitToken = False
return currOpponentHitToken
else:
currPlayerHitToken = True
return currOpponentHitToken
def determineDamage(weapon, modifier, directed):
"""
Returns an integer: damage inflicted by the weapon.
Relative to the player/opponent's weapon, inflictDamage is called and the function's
effects to the opposing's HP is calculated.
"""
if weapon == "fists" or weapon == "claws":
return inflictDamage(player, 2 * modifier, 6 * modifier)
elif weapon == "Iron Broadsword":
return inflictDamage(opponent, 100, 250)
return
def inflictDamage(inflicted, min, max):
"""
Returns damage inflicted to determineDamage: which is called in main().
"""
damageInflicted = random.randrange(min, max+1)
if damageInflicted == 0:
return 'Miss!'
else:
inflicted.health-=damageInflicted
return damageInflicted
def getWinner(player, enemy):
"""
Returns winner of the match by comparing object's HP attribute once knocked below zero.
"""
if player.health > enemy.health:
print player.name, 'wins!'
else:
print enemy.name, 'wins!'
def getHP(character):
return character.health
opponent = GenerateMonster('Goblin King', 1000, 100, 'fists', 15)
player = GenerateMonster('Paladin', 150, 200, 'Iron Broadsword', 15)
def main():
playerInitialHealth = player.health
opponentInitialHealth = opponent.health
while (opponent.health >= 0) and (player.health >= 0):
time.sleep(1)
if (currPlayerHitToken):
print "%s HP:" % player.name, getHP(player)
print "Damage to %s:" % opponent.name, determineDamage(player.weapon, 1, opponent.health)
else:
print '%s HP:' % player.name, getHP(player)
print '%s missed!' % player.name
time.sleep(1)
if(currOpponentHitToken):
print "%s HP:" % opponent.name, getHP(opponent)
print "Damage to: %s" % player.name, determineDamage(opponent.weapon, 1, player.health)
else:
print "%s HP:" % opponent.name, getHP(opponent)
print '%s missed!' % opponent.name
getWinner(player, opponent)
if __name__ == "__main__":
main()
| mit | -4,317,210,588,742,869,500 | 22.75 | 93 | 0.708453 | false |
TC01/calcpkg | calcrepo/repos/ticalc.py | 1 | 4723 | import urllib
from calcrepo import info
from calcrepo import repo
name = "ticalc"
url = "http://www.ticalc.org/"
enabled = True
class TicalcRepository(repo.CalcRepository):
def formatDownloadUrl(self, url):
return "http://www.ticalc.org" + url
def updateRepoIndexes(self, verbose=False):
self.printd("Reading ticalc.org master index (this will take some time).")
# First read in the text (the only network process involved)
masterIndex = urllib.urlopen('http://www.ticalc.org/pub/master.index').read()
self.printd(" Read in ticalc.org master index.")
# Delete and open new indices
files = self.openIndex(self.index.fileIndex, "files index")
names = self.openIndex(self.index.nameIndex, "names index")
if files is None or names is None:
try:
files.close()
except:
return
# Now, parse the enormous data and write index files
self.printd(" ")
masterIndex = masterIndex[39:]
directory = ""
while len(masterIndex) > 2:
line = masterIndex[:masterIndex.find('\n')]
masterIndex = masterIndex[masterIndex.find('\n') + 1:]
if line == "":
continue
if line[:9] == "Index of ":
dirData = line[9:]
directory = dirData[:dirData.find(" ")]
if verbose:
self.printd(" Caching " + line[9:])
else:
fileData = line[:line.find(" ")]
files.write(directory + '/' + fileData + '\n')
nameData = line[len(fileData)+1:].lstrip()
names.write(nameData + '\n')
# Close the indexes now
files.close()
names.close()
self.printd("Finished updating ticalc.org repo.\n")
def getFileInfo(self, fileUrl, fileName):
#Get the category path for the file
categoryPath = "http://www.ticalc.org/"
splitUrls = fileUrl.split('/')
for splitUrl in splitUrls:
if splitUrl != "" and (not "." in splitUrl):
categoryPath += splitUrl + '/'
#Now open the category page and extract the URL for the file info page
categoryPage = urllib.urlopen(categoryPath, "")
categoryData = categoryPage.read()
categoryPage.close()
index = categoryData.find(fileUrl) - 7
rIndex = categoryData.rfind('A HREF="', 0, index)
infoUrl = categoryData[rIndex + 9:]
infoUrl = "http://www.ticalc.org/" + infoUrl[:infoUrl.find('">')]
#Create a file info object
fileInfo = info.FileInfo(fileUrl, fileName, infoUrl, self.output)
infoPage = urllib.urlopen(infoUrl)
infoText = infoPage.read()
infoPage.close()
#Fill in all the data bits
fileInfo.description = self.getBaseFileData(infoText, "Description")
fileInfo.fileSize = self.getBaseFileData(infoText, "File Size")
fileInfo.fileDate = self.getBaseFileData(infoText, "File Date and Time", 47, 2)
fileInfo.documentation = self.getBaseFileData(infoText, "Documentation Included?")
fileInfo.sourceCode = self.getBaseFileData(infoText, "Source Code")
fileInfo.category = self.getFileCategory(infoText)
fileInfo.author = self.getFileAuthor(infoText)
fileInfo.downloads = self.getNumDownloads(infoText)
fileInfo.repository = self.name
#Print the file info object
fileInfo.printData(self.output)
return fileInfo
def getBaseFileData(self, fileInfo, data, index1 = 47, index2 = 1):
"""Function to initialize the simple data for file info"""
result = fileInfo[fileInfo.find(data):]
result = result[result.find("<FONT ") + index1:]
result = result[:result.find("</FONT>") - index2]
return result
def getFileCategory(self, fileInfo):
"""Function to get the file category for file info"""
category = fileInfo[fileInfo.find("Category"):]
category = category[category.find("<FONT ") + 47:]
category = category[category.find('">') + 2:]
category = category[:category.find("</A></B>") - 0]
return category
def getFileAuthor(self, fileInfo):
"""Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here"""
author = fileInfo[fileInfo.find("Author"):]
author = author[author.find("<FONT ") + 47:]
author = author[author.find('<B>') + 3:]
authormail = author[author.find("mailto:") + 7:]
authormail = authormail[:authormail.find('"')]
author = author[:author.find("</B></A>") - 0]
author = author + " (" + authormail + ")"
return author
def getNumDownloads(self, fileInfo):
"""Function to get the number of times a file has been downloaded"""
downloads = fileInfo[fileInfo.find("FILE INFORMATION"):]
if -1 != fileInfo.find("not included in ranking"):
return "0"
downloads = downloads[:downloads.find(".<BR>")]
downloads = downloads[downloads.find("</A> with ") + len("</A> with "):]
return downloads
def getRepository():
"""Returns the relevant CalcRepository object for this repo file"""
global name, url
return TicalcRepository(name, url)
| mit | 8,673,408,777,575,601,000 | 34.780303 | 124 | 0.691086 | false |
The-Compiler/qutebrowser | qutebrowser/browser/webengine/darkmode.py | 1 | 10368 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Get darkmode arguments to pass to Qt.
Overview of blink setting names based on the Qt version:
Qt 5.10
-------
First implementation, called "high contrast mode".
- highContrastMode (kOff/kSimpleInvertForTesting/kInvertBrightness/kInvertLightness)
- highContrastGrayscale (bool)
- highContrastContrast (float)
- highContractImagePolicy (kFilterAll/kFilterNone)
Qt 5.11, 5.12, 5.13
-------------------
New "smart" image policy.
- Mode/Grayscale/Contrast as above
- highContractImagePolicy (kFilterAll/kFilterNone/kFilterSmart [new!])
Qt 5.14
-------
Renamed to "darkMode".
- darkMode (kOff/kSimpleInvertForTesting/kInvertBrightness/kInvertLightness/
kInvertLightnessLAB [new!])
- darkModeGrayscale (bool)
- darkModeContrast (float)
- darkModeImagePolicy (kFilterAll/kFilterNone/kFilterSmart)
- darkModePagePolicy (kFilterAll/kFilterByBackground) [new!]
- darkModeTextBrightnessThreshold (int) [new!]
- darkModeBackgroundBrightnessThreshold (int) [new!]
- darkModeImageGrayscale (float) [new!]
Qt 5.15.0 and 5.15.1
--------------------
"darkMode" split into "darkModeEnabled" and "darkModeInversionAlgorithm".
- darkModeEnabled (bool) [new!]
- darkModeInversionAlgorithm (kSimpleInvertForTesting/kInvertBrightness/
kInvertLightness/kInvertLightnessLAB)
- Rest (except darkMode) as above.
- NOTE: smart image policy is broken with Qt 5.15.0!
Qt 5.15.2
---------
Prefix changed to "forceDarkMode".
- As with Qt 5.15.0 / .1, but with "forceDarkMode" as prefix.
"""
import enum
from typing import Any, Iterable, Iterator, Mapping, Optional, Set, Tuple, Union
try:
from PyQt5.QtWebEngine import PYQT_WEBENGINE_VERSION
except ImportError: # pragma: no cover
# Added in PyQt 5.13
PYQT_WEBENGINE_VERSION = None # type: ignore[assignment]
from qutebrowser.config import config
from qutebrowser.utils import usertypes, qtutils, utils, log
class Variant(enum.Enum):
"""A dark mode variant."""
unavailable = enum.auto()
qt_511_to_513 = enum.auto()
qt_514 = enum.auto()
qt_515_0 = enum.auto()
qt_515_1 = enum.auto()
qt_515_2 = enum.auto()
# Mapping from a colors.webpage.darkmode.algorithm setting value to
# Chromium's DarkModeInversionAlgorithm enum values.
_ALGORITHMS = {
# 0: kOff (not exposed)
# 1: kSimpleInvertForTesting (not exposed)
'brightness-rgb': 2, # kInvertBrightness
'lightness-hsl': 3, # kInvertLightness
'lightness-cielab': 4, # kInvertLightnessLAB
}
# kInvertLightnessLAB is not available with Qt < 5.14
_ALGORITHMS_BEFORE_QT_514 = _ALGORITHMS.copy()
_ALGORITHMS_BEFORE_QT_514['lightness-cielab'] = _ALGORITHMS['lightness-hsl']
# Mapping from a colors.webpage.darkmode.policy.images setting value to
# Chromium's DarkModeImagePolicy enum values.
_IMAGE_POLICIES = {
'always': 0, # kFilterAll
'never': 1, # kFilterNone
'smart': 2, # kFilterSmart
}
# Mapping from a colors.webpage.darkmode.policy.page setting value to
# Chromium's DarkModePagePolicy enum values.
_PAGE_POLICIES = {
'always': 0, # kFilterAll
'smart': 1, # kFilterByBackground
}
_BOOLS = {
True: 'true',
False: 'false',
}
_DarkModeSettingsType = Iterable[
Tuple[
str, # qutebrowser option name
str, # darkmode setting name
# Mapping from the config value to a string (or something convertable
# to a string) which gets passed to Chromium.
Optional[Mapping[Any, Union[str, int]]],
],
]
_DarkModeDefinitionType = Tuple[_DarkModeSettingsType, Set[str]]
_QT_514_SETTINGS = [
('policy.images', 'darkModeImagePolicy', _IMAGE_POLICIES),
('contrast', 'darkModeContrast', None),
('grayscale.all', 'darkModeGrayscale', _BOOLS),
('policy.page', 'darkModePagePolicy', _PAGE_POLICIES),
('threshold.text', 'darkModeTextBrightnessThreshold', None),
('threshold.background', 'darkModeBackgroundBrightnessThreshold', None),
('grayscale.images', 'darkModeImageGrayscale', None),
]
# Our defaults for policy.images are different from Chromium's, so we mark it as
# mandatory setting - except on Qt 5.15.0 where we don't, so we don't get the
# workaround warning below if the setting wasn't explicitly customized.
_DARK_MODE_DEFINITIONS: Mapping[Variant, _DarkModeDefinitionType] = {
Variant.unavailable: ([], set()),
Variant.qt_515_2: ([
# 'darkMode' renamed to 'forceDarkMode'
('enabled', 'forceDarkModeEnabled', _BOOLS),
('algorithm', 'forceDarkModeInversionAlgorithm', _ALGORITHMS),
('policy.images', 'forceDarkModeImagePolicy', _IMAGE_POLICIES),
('contrast', 'forceDarkModeContrast', None),
('grayscale.all', 'forceDarkModeGrayscale', _BOOLS),
('policy.page', 'forceDarkModePagePolicy', _PAGE_POLICIES),
('threshold.text', 'forceDarkModeTextBrightnessThreshold', None),
(
'threshold.background',
'forceDarkModeBackgroundBrightnessThreshold',
None
),
('grayscale.images', 'forceDarkModeImageGrayscale', None),
], {'enabled', 'policy.images'}),
Variant.qt_515_1: ([
# 'policy.images' mandatory again
('enabled', 'darkModeEnabled', _BOOLS),
('algorithm', 'darkModeInversionAlgorithm', _ALGORITHMS),
('policy.images', 'darkModeImagePolicy', _IMAGE_POLICIES),
('contrast', 'darkModeContrast', None),
('grayscale.all', 'darkModeGrayscale', _BOOLS),
('policy.page', 'darkModePagePolicy', _PAGE_POLICIES),
('threshold.text', 'darkModeTextBrightnessThreshold', None),
('threshold.background', 'darkModeBackgroundBrightnessThreshold', None),
('grayscale.images', 'darkModeImageGrayscale', None),
], {'enabled', 'policy.images'}),
Variant.qt_515_0: ([
# 'policy.images' not mandatory because it's broken
('enabled', 'darkModeEnabled', _BOOLS),
('algorithm', 'darkModeInversionAlgorithm', _ALGORITHMS),
('policy.images', 'darkModeImagePolicy', _IMAGE_POLICIES),
('contrast', 'darkModeContrast', None),
('grayscale.all', 'darkModeGrayscale', _BOOLS),
('policy.page', 'darkModePagePolicy', _PAGE_POLICIES),
('threshold.text', 'darkModeTextBrightnessThreshold', None),
('threshold.background', 'darkModeBackgroundBrightnessThreshold', None),
('grayscale.images', 'darkModeImageGrayscale', None),
], {'enabled'}),
Variant.qt_514: ([
('algorithm', 'darkMode', _ALGORITHMS), # new: kInvertLightnessLAB
('policy.images', 'darkModeImagePolicy', _IMAGE_POLICIES),
('contrast', 'darkModeContrast', None),
('grayscale.all', 'darkModeGrayscale', _BOOLS),
('policy.page', 'darkModePagePolicy', _PAGE_POLICIES),
('threshold.text', 'darkModeTextBrightnessThreshold', None),
('threshold.background', 'darkModeBackgroundBrightnessThreshold', None),
('grayscale.images', 'darkModeImageGrayscale', None),
], {'algorithm', 'policy.images'}),
Variant.qt_511_to_513: ([
('algorithm', 'highContrastMode', _ALGORITHMS_BEFORE_QT_514),
('policy.images', 'highContrastImagePolicy', _IMAGE_POLICIES), # new: smart
('contrast', 'highContrastContrast', None),
('grayscale.all', 'highContrastGrayscale', _BOOLS),
], {'algorithm', 'policy.images'}),
}
def _variant() -> Variant:
"""Get the dark mode variant based on the underlying Qt version."""
if PYQT_WEBENGINE_VERSION is not None:
# Available with Qt >= 5.13
if PYQT_WEBENGINE_VERSION >= 0x050f02:
return Variant.qt_515_2
elif PYQT_WEBENGINE_VERSION == 0x050f01:
return Variant.qt_515_1
elif PYQT_WEBENGINE_VERSION == 0x050f00:
return Variant.qt_515_0
elif PYQT_WEBENGINE_VERSION >= 0x050e00:
return Variant.qt_514
elif PYQT_WEBENGINE_VERSION >= 0x050d00:
return Variant.qt_511_to_513
raise utils.Unreachable(hex(PYQT_WEBENGINE_VERSION))
# If we don't have PYQT_WEBENGINE_VERSION, we're on 5.12 (or older, but 5.12 is the
# oldest supported version).
assert not qtutils.version_check( # type: ignore[unreachable]
'5.13', compiled=False)
return Variant.qt_511_to_513
def settings() -> Iterator[Tuple[str, str]]:
"""Get necessary blink settings to configure dark mode for QtWebEngine."""
if not config.val.colors.webpage.darkmode.enabled:
return
variant = _variant()
setting_defs, mandatory_settings = _DARK_MODE_DEFINITIONS[variant]
for setting, key, mapping in setting_defs:
# To avoid blowing up the commandline length, we only pass modified
# settings to Chromium, as our defaults line up with Chromium's.
# However, we always pass enabled/algorithm to make sure dark mode gets
# actually turned on.
value = config.instance.get(
'colors.webpage.darkmode.' + setting,
fallback=setting in mandatory_settings)
if isinstance(value, usertypes.Unset):
continue
if (setting == 'policy.images' and value == 'smart' and
variant == Variant.qt_515_0):
# WORKAROUND for
# https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/304211
log.init.warning("Ignoring colors.webpage.darkmode.policy.images = smart "
"because of Qt 5.15.0 bug")
continue
if mapping is not None:
value = mapping[value]
yield key, str(value)
| gpl-3.0 | 8,197,104,935,258,120,000 | 34.751724 | 87 | 0.671971 | false |
GDGCB/coding-dojo | 003-python-seq-calc-B/test_stat_calculator.py | 1 | 1766 | from unittest import TestCase
from stat_calculator import StatCalculator
class TestStatCalculator(TestCase):
def setUp(self):
self.calculator = StatCalculator()
def test_calc(self):
self.assertEqual(1, 1)
def test_minimum(self):
self.assertEqual( self.calculator.calc([6, 9, 15, -2, 92, 11])["min"], -2 )
self.assertEqual( self.calculator.calc([6, 9, 15, -3, 92, 11])["min"], -3 )
def test_maximum(self):
self.assertEqual( self.calculator.calc([6, 9, 15, -2, 92, 11])["max"], 92 )
self.assertEqual( self.calculator.calc([6, 9, 15, -3, 93, 11])["max"], 93 )
def test_number_of_elements(self):
self.assertEqual( self.calculator.calc([6, 9, 15, -2, 92, 11])["count"], 6 )
def test_average_value(self):
self.assertEqual( self.calculator.calc([6, 9, 15, -2, 92, 11])["average"], 21.833333333333332 )
self.assertEqual( self.calculator.calc([])["average"], None )
def test_sum(self):
self.assertEqual( self.calculator.calc([6, 9, 15, -2, 92, 11])["sum"], 131 )
def test_median(self):
self.assertEqual(self.calculator.calc([6, 9, 15, -2, 92, 11])["median"], 10)
def test_removeDuplicates(self):
self.assertEqual(self.calculator.calc([6, 9, 15, -2, 92, 11, 11], True)["median"], 10)
def test_remove_odd_numbers(self):
self.assertEqual(self.calculator.calc([1,2,3], remove_odd_numbers=True)["sum"], 4)
def test_remove_even_numbers(self):
self.assertEqual(self.calculator.calc([1,2,3], remove_even_numbers=True)["sum"], 2)
def test_remove_minmal_and_maximal_numbers(self):
self.assertEqual(self.calculator.calc([6, 9, 15, -2, 92, 11, 11], remove_minmal_and_maximal_numbers=True)["sum"], 52)
| bsd-3-clause | -3,959,332,840,978,213,000 | 35.791667 | 125 | 0.62684 | false |
MayankGo/ec2-api | ec2api/api/__init__.py | 1 | 26399 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Starting point for routing EC2 requests.
"""
import hashlib
import json
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import six
import webob
import webob.dec
import webob.exc
from ec2api.api import apirequest
from ec2api.api import ec2utils
from ec2api.api import faults
from ec2api import context
from ec2api import exception
from ec2api.i18n import _
from ec2api import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.StrOpt('keystone_url',
default='http://localhost',
help='URL to get token from ec2 request.'),
cfg.StrOpt('keystone_sig_url',
default='$keystone_url/ec2-auth',
help='URL to validate signature/access key in ec2 request.'),
cfg.StrOpt('keystone_token_url',
default='$keystone_url/token-auth',
help='URL to validate token in ec2 request.'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'ec2api.api.auth')
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
# Fault Wrapper around all EC2 requests #
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception:
LOG.exception(_("FaultWrapper cathes error"))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
action = apireq.action
else:
action = None
ctxt = request.environ.get('ec2api.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class InvalidCredentialsException(Exception):
def __init__(self, msg):
super(Exception, self).__init__()
self.msg = msg
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
resourceIdMapping = {
'CreateVpc' : '*',
'CreateSubnet' : '*',
'CreateRouteTable' : '*',
'CreateRoute' : 'RouteTableId',
'CreateSecurityGroup' : '*',
'DeleteVpc' : 'VpcId',
'DeleteSubnet' : 'SubnetId',
'DeleteRouteTable' : 'RouteTableId',
'DeleteSecurityGroup' : 'GroupId',
'DeleteRoute' : 'RouteTableId',
'AssociateRouteTable' : 'SubnetId',
'DisassociateRouteTable' : 'AssociationId',
'AuthorizeSecurityGroupIngress' : 'GroupId',
'AuthorizeSecurityGroupEgress' : 'GroupId',
'RevokeSecurityGroupEgress' : 'GroupId',
'RevokeSecurityGroupIngress' : 'GroupId',
'DescribeVpcs' : '*',
'DescribeSubnets' : '*',
'DescribeRouteTables' : '*',
'DescribeSecurityGroups' : '*',
'AllocateAddress' : '',
'AssociateAddress' : '',
'DisassociateAddress' : '',
'ReleaseAddress' : '',
'DescribeAddresses' : '',
'CreateExtnetwork' : '',
'UpdateQuota' : '',
'ShowQuota' : ''
}
armappingdict = {
'CreateVpc': {
"action": "jrn:jcs:vpc:CreateVpc",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'DeleteVpc':
{
"action": "jrn:jcs:vpc:DeleteVpc",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'DescribeVpcs':
{
"action": "jrn:jcs:vpc:DescribeVpcs",
"resource": "jrn:jcs:vpc::Vpc:",
"implicit_allow": "False"
},
'CreateSubnet':
{
"action": "jrn:jcs:vpc:CreateSubnet",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DeleteSubnet':
{
"action": "jrn:jcs:vpc:DeleteSubnet",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DescribeSubnets':
{
"action": "jrn:jcs:vpc:DescribeSubnets",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'CreateRouteTable':
{
"action": "jrn:jcs:vpc:CreateRouteTable",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'DeleteRouteTable':
{
"action": "jrn:jcs:vpc:DeleteRouteTable",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'AssociateRouteTable':
{
"action": "jrn:jcs:vpc:AssociateRouteTable",
"resource": "jrn:jcs:vpc::Subnet:",
"implicit_allow": "False"
},
'DisassociateRouteTable':
{
"action": "jrn:jcs:vpc:DisassociateRouteTable",
"resource": "jrn:jcs:vpc::AssociatedRouteTable:",
"implicit_allow": "False"
},
'DescribeRouteTables':
{
"action": "jrn:jcs:vpc:DescribeRouteTables",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'CreateRoute':
{
"action": "jrn:jcs:vpc:CreateRoute",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'DeleteRoute':
{
"action": "jrn:jcs:vpc:DeleteRoute",
"resource": "jrn:jcs:vpc::RouteTable:",
"implicit_allow": "False"
},
'AllocateAddress': None,
'AssociateAddress': None,
'DisassociateAddress': None,
'ReleaseAddress': None,
'DescribeAddresses': None,
'CreateSecurityGroup':
{
"action": "jrn:jcs:vpc:CreateSecurityGroup",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'DeleteSecurityGroup':
{
"action": "jrn:jcs:vpc:DeleteSecurityGroup",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'DescribeSecurityGroups':
{
"action": "jrn:jcs:vpc:DescribeSecurityGroups",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'AuthorizeSecurityGroupEgress':
{
"action": "jrn:jcs:vpc:AuthorizeSecurityGroupEgress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'AuthorizeSecurityGroupIngress':
{
"action": "jrn:jcs:vpc:AuthorizeSecurityGroupIngress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'RevokeSecurityGroupEgress':
{
"action": "jrn:jcs:vpc:RevokeSecurityGroupEgress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'RevokeSecurityGroupIngress':
{
"action": "jrn:jcs:vpc:RevokeSecurityGroupIngress",
"resource": "jrn:jcs:vpc::SecurityGroup:",
"implicit_allow": "False"
},
'CreateExtnetwork': None,
'UpdateQuota': None,
'ShowQuota' : None
}
def _get_signature(self, req):
"""Extract the signature from the request.
This can be a get/post variable or for version 4 also in a header
called 'Authorization'.
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is not None:
return sig
if 'Authorization' not in req.headers:
return None
auth_str = req.headers['Authorization']
if not auth_str.startswith('AWS4-HMAC-SHA256'):
return None
return auth_str.partition("Signature=")[2].split(',')[0]
def _get_access(self, req):
"""Extract the access key identifier.
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
version 4 it is either an X-Amz-Credential parameter or a Credential=
field in the 'Authorization' header string.
"""
access = req.params.get('JCSAccessKeyId')
if access is not None:
return access
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is not None:
return access
if 'Authorization' not in req.headers:
return None
auth_str = req.headers['Authorization']
if not auth_str.startswith('AWS4-HMAC-SHA256'):
return None
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
return cred_str.split("/")[0]
def _get_auth_token(self, req):
"""Extract the Auth token from the request
This is the header X-Auth-Token present in the request
"""
auth_token = None
auth_token = req.headers.get('X-Auth-Token')
return auth_token
def _get_resource_id(self, req, action):
resource = None
resourceId = None
resource = self.resourceIdMapping[action]
if '*' == resource:
resourceId = resource
elif '' == resource:
resourceId = resource
else:
resourceId = req.params.get(resource)
return resourceId
def _get_action_resource_mapping(self, req):
armvalue = None
action = req.params.get('Action')
try:
actiondict = self.armappingdict[action]
if actiondict == None:
# No mapping available. Pass an empty list.
armvalue = []
else:
# Create a new instance of the action resource mapping dictionary for subsequent
# modifications and pass it as a member of a list
armvalue = [dict(actiondict)]
except KeyError:
return armvalue
return armvalue
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
# NOTE(alevine) We need to calculate the hash here because
# subsequent access to request modifies the req.body so the hash
# calculation will yield invalid results.
headers = {'Content-Type': 'application/json'}
auth_token = self._get_auth_token(req)
if None == auth_token:
signature = self._get_signature(req)
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = self._get_access(req)
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
params = {}
else:
# Make a copy of args for authentication and signature verification
params = dict(req.params)
# Not part of authentication args
params.pop('Signature', None)
#version = params.pop('Version')
action = req.params.get('Action')
arm = {}
arm = self._get_action_resource_mapping(req)
if None == arm:
msg = _("Action : " + action + " Not Found")
return faults.ec2_error_response(request_id, "ActionNotFound", msg,
status=404)
resourceId = None
resourceId = self._get_resource_id(req, action)
if None == resourceId:
msg = _("Action is : " + action + " and ResourceId Not Found")
return faults.ec2_error_response(request_id, "ResourceIdNotFound", msg,
status=404)
if '' != resourceId:
arm[0]['resource'] = arm[0].get('resource') + resourceId
if auth_token:
data = {}
iam_validation_url = CONF.keystone_token_url
headers['X-Auth-Token'] = auth_token
data['action_resource_list'] = arm
data = jsonutils.dumps(data)
else:
host = req.host.split(':')[0]
cred_dict = {
'access': access,
'action_resource_list': arm,
'body_hash': '',
'headers': {},
'host': host,
'signature': signature,
'verb': req.method,
'path': '/',
'params': params,
}
iam_validation_url = CONF.keystone_sig_url
if "ec2" in iam_validation_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
data = jsonutils.dumps(creds)
verify = CONF.ssl_ca_file or not CONF.ssl_insecure
response = requests.request('POST', iam_validation_url, verify=verify,
data=data, headers=headers)
status_code = response.status_code
if status_code != 200:
LOG.error("Request headers - %s", str(headers))
LOG.error("Request params - %s", str(data))
LOG.error("Response headers - %s", str(response.headers))
LOG.error("Response content - %s", str(response._content))
msg = response.reason
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=status_code)
result = response.json()
try:
user_id = result['user_id']
project_id = result['account_id']
if auth_token:
token_id = auth_token
else:
token_id = result['token_id']
if not token_id or not project_id or not user_id:
raise KeyError
user_name = project_name = 'default'
roles = []
catalog = []
except (AttributeError, KeyError):
LOG.exception(_("Keystone failure"))
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
ctxt = context.RequestContext(user_id, project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog,
api_version=req.params.get('Version'))
req.environ['ec2api.context'] = ctxt
return self.application
class Requestify(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'JCSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(
req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params.get('SignatureVersion')
if version and int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
args.pop(non_arg, None)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug('action: %s', action)
for key, value in args.items():
LOG.debug('arg: %(key)s\t\tval: %(value)s',
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(
action, req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, unexpected=False):
"""Return an EC2 error response.
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
code = exception_to_ec2code(ex)
for status_name in ('code', 'status', 'status_code', 'http_status'):
status = getattr(ex, status_name, None)
if isinstance(status, int):
break
else:
status = 500
if unexpected:
log_fun = LOG.error
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
exc_info = sys.exc_info()
else:
log_fun = LOG.debug
log_msg = _("%(ex_name)s raised: %(ex_str)s")
exc_info = None
context = req.environ['ec2api.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context, exc_info=exc_info)
if unexpected and status >= 500:
message = _('Unknown error occurred.')
elif getattr(ex, 'message', None):
message = unicode(ex.message)
elif ex.args and any(arg for arg in ex.args):
message = " ".join(map(unicode, ex.args))
else:
message = unicode(ex)
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action', passing 'ec2api.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2api.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except Exception as ex:
return ec2_error_ex(
ex, req, unexpected=not isinstance(ex, exception.EC2Exception))
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
| apache-2.0 | -3,200,540,836,945,918,000 | 38.638138 | 97 | 0.466609 | false |
houmie/duelify | duelify/settings.py | 1 | 12912 | # Django settings for duelify project.
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
#LOGIN_ERROR_URL = '/error'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/new-users-invited/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/login-invited/'
SIGNUP_ERROR_URL = '/signup-error/'
LOGIN_ERROR_URL = '/signup-error/'
SOCIAL_AUTH_USER_MODEL = 'duelify_app.User'
TWITTER_CONSUMER_KEY = 'xxxx'
TWITTER_CONSUMER_SECRET = 'xxxx'
FACEBOOK_APP_ID = 'xxx'
FACEBOOK_API_SECRET = 'xxxx'
FACEBOOK_EXTENDED_PERMISSIONS = ['email', 'user_birthday', 'user_location']
#FACEBOOK_EXTRA_DATA = [('user_birthday', 'user_location')]
GOOGLE_OAUTH2_CLIENT_ID = 'xxxx'
GOOGLE_OAUTH2_CLIENT_SECRET = 'xxxx'
SOCIAL_AUTH_REDIRECT_IS_HTTPS = False
#SOCIAL_AUTH_RAISE_EXCEPTIONS = True
AUTH_USER_MODEL = 'duelify_app.User'
SITE_HOST = 'duelify.com:8000'
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = '587'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'xxxxxx'
EMAIL_USE_TLS = True
SERVER_EMAIL = '[email protected]'
EMAIL_SUBJECT_PREFIX = '[duelify]'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
GEOIP_PATH = '/home/hooman/venuscloud/duelify-env/site/database/'
#GEOS_LIBRARY_PATH = '/opt/geos/lib/libgeos_c.so'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Hooman', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'duelifydb', # Or path to database file if using sqlite3.
'USER': 'django_user', # Not used with sqlite3.
'PASSWORD': 'houmie123', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
PIPELINE_YUGLIFY_BINARY = '/home/hooman/venuscloud/duelify-env/node_modules/yuglify/bin/yuglify'
PIPELINE_CLOSURE_BINARY = '/home/hooman/venuscloud/duelify-env/bin/closure'
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
#PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.closure.ClosureCompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS = {
'duelify_css': {
'source_filenames': (
'duelify/duelify.css',
),
'output_filename': 'duelify/duelify.min.css',
},
'bootstrap_datepicker_css': {
'source_filenames': (
'bootstrap-datepicker/css/datepicker.css',
),
'output_filename': 'bootstrap-datepicker/css/datepicker.min.css',
},
'social_buttons_css': {
'source_filenames': (
'css-social-buttons/css/zocial.css',
),
'output_filename': 'css-social-buttons/css/zocial.min.css',
},
}
PIPELINE_JS = {
'duelify_js': {
'source_filenames': (
'duelify/duelify.js',
),
'output_filename': 'duelify/duelify.min.js',
},
'bootstrap_datepicker_js': {
'source_filenames': (
'bootstrap-datepicker/js/bootstrap-datepicker.js',
),
'output_filename': 'bootstrap-datepicker/js/bootstrap-datepicker.min.js',
},
'ajaxform_js': {
'source_filenames': (
'ajaxform/jquery.form.js',
),
'output_filename': 'ajaxform/jquery.form.min.js',
},
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['duelify.com', 'www.duelify.com', '54.225.168.25']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '/home/hooman/venuscloud/duelify-env/site/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/hooman/venuscloud/duelify-env/site/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/hooman/venuscloud/duelify-env/site/static_files/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
#TINYMCE_JS_URL = STATIC_URL + 'tinymce/js/tinymce/tinymce.min.js'
#TINYMCE_JS_URL = '/home/hooman/venuscloud/duelify-env/site/static_files/tinymce/js/tinymce/tinymce.min.js'
#TINYMCE_JS_ROOT = STATIC_ROOT + 'tinymce/js/tinymce'
#TINYMCE_JS_ROOT = '/home/hooman/venuscloud/duelify-env/site/static_files/tinymce/js/tinymce'
TINYMCE_COMPRESSOR = True
TINYMCE_DEFAULT_CONFIG = {
# General options
'mode' : "textareas",
'theme' : "advanced",
'plugins' : "media,pagebreak,style,layer,table,save,advhr,advimage,advlink,emotions,iespell,inlinepopups,insertdatetime,preview,media,searchreplace,print,contextmenu,paste,directionality,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras,template,wordcount,advlist,autosave",
# 'plugins': "spellchecker,directionality,paste,searchreplace",
# 'language': "{{ language }}",
# 'directionality': "{{ directionality }}",
# 'spellchecker_languages' : "{{ spellchecker_languages }}",
# 'spellchecker_rpc_url' : "{{ spellchecker_rpc_url }}",
'theme_advanced_buttons1_add' : "media",
'theme_advanced_buttons2_add' : "advimage",
# Theme options
'theme_advanced_buttons1' : "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,fontselect,fontsizeselect,", #fullscreen,code",
'theme_advanced_buttons2' : "bullist,numlist,|,outdent,indent,blockquote,|,undo,redo,|,link,unlink,|,forecolor,backcolor",
#'theme_advanced_buttons3' : "tablecontrols,|,hr,sub,sup,|,charmap",
'theme_advanced_toolbar_location' : "top",
'theme_advanced_toolbar_align' : "left",
'theme_advanced_statusbar_location' : "bottom",
'theme_advanced_resizing' : 'true',
#Example content CSS (should be your site CSS)
#content_css : "/css/style.css",
'template_external_list_url' : "lists/template_list.js",
'external_link_list_url' : "lists/link_list.js",
'external_image_list_url' : "lists/image_list.js",
'media_external_list_url' : "lists/media_list.js",
# Style formats
'style_formats' : [
{'title' : 'Bold text', 'inline' : 'strong'},
{'title' : 'Red text', 'inline' : 'span', 'styles' : {'color' : '#ff0000'}},
{'title' : 'Help', 'inline' : 'strong', 'classes' : 'help'},
{'title' : 'Table styles'},
{'title' : 'Table row 1', 'selector' : 'tr', 'classes' : 'tablerow'}
],
'width': '700',
'height': '400'
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'il8zx-az!ti=e-@m5u&q54q%_%aidnfj05jq4#c8ldax!h3mn2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
AUTHENTICATION_BACKENDS = ('social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.twitter.TwitterBackend',
'django.contrib.auth.backends.ModelBackend',)
#TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
# "django.core.context_processors.debug",
# "django.core.context_processors.i18n",
# "django.core.context_processors.media",
# "django.core.context_processors.static",
# "django.core.context_processors.tz",
# "django.contrib.messages.context_processors.messages",
# "django.core.context_processors.request",
# 'social_auth.context_processors.social_auth_by_name_backends',
# 'social_auth.context_processors.social_auth_backends',
# 'social_auth.context_processors.social_auth_by_type_backends',
# 'social_auth.context_processors.social_auth_login_redirect',
#)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'pipeline.middleware.MinifyHTMLMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
# Uncomment the next line for simple clickjacking protection:
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'duelify_app.utils.social_media_save',
)
ROOT_URLCONF = 'duelify.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'duelify.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/home/hooman/venuscloud/duelify-env/site/templates/'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'widget_tweaks',
'tinymce',
'pipeline',
'south',
'django.contrib.sitemaps',
'social_auth',
'duelify_app',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | -3,736,456,069,093,995,000 | 36.865103 | 286 | 0.679445 | false |
ANR-DIADEMS/timeside-diadems | setup.py | 1 | 2688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages, Extension
import sys
from setuptools.command.test import test as TestCommand
#try:
# import multiprocessing # Workaround for http://bugs.python.org/issue15881
#except ImportError:
# pass
# Pytest
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '--verbose']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
]
KEYWORDS = 'audio analysis features extraction MIR transcoding graph visualize plot HTML5 interactive metadata player'
setup(
# Package
name='TimeSide-Diadems',
install_requires=[
'timeside',
# Dependencies for limsi_diarization
'py_sonicvisualiser',
'pyannote.core==0.13.1',
'pyannote.features',
'pyannote.algorithms',
'pyannote.metrics==0.12.1'
],
url='https://github.com/ANR-DIADEMS/timeside-diadems',
description="TimeSide extensions developped during the Diadems project",
long_description=open('README.rst').read(),
author="Guillaume Pellerin, Thomas Fillon",
author_email="[email protected], [email protected]",
version='0.1',
platforms=['OS Independent'],
license='Gnu Public License V2',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
include_package_data=True,
zip_safe=False,
# Tests
tests_require=['pytest'],
cmdclass={'test': PyTest},
ext_modules=[Extension('timeside.plugins.diadems.yin',
['timeside/plugins/diadems/yin/pitch_yin.c',
'timeside/plugins/diadems/yin/pitchyin.c']),
Extension('timeside.plugins.diadems.diverg',
['timeside/plugins/diadems/diverg/diverg.c',
'timeside/plugins/diadems/diverg/subdiv.c'])],
)
| gpl-2.0 | -6,027,570,966,313,697,000 | 32.6 | 118 | 0.641741 | false |
uogbuji/pybibframe | lib/writer/exhibitexplorer.py | 1 | 10035 | '''
'''
import re
import sys
import logging
import itertools
#from datachef.ids import simple_hashstring
from amara3 import iri
from versa import SUBJECT, RELATIONSHIP, VALUE
BFV = 'http://bibframe.org/vocab/'
WORKCLASS = iri.absolutize('Work', BFV)
INSTANCECLASS = iri.absolutize('Instance', BFV)
TYPE_REL = I(iri.absolutize('type', BFZ))
def process(source, work_sink, instance_sink, objects_sink, annotations_sink, logger=logging):
'''
Take an in-memory BIBFRAME model and emit it in Exhibit-based explorer ready form
'''
subobjs = subobjects(objects_sink)
anns = annotations(annotations_sink)
@coroutine
def receive_items():
'''
Receives each resource bundle and processes it by creating an item
dict which is then forwarded to the sink
'''
ix = 1
while True:
workid = yield
#Extract the statements about the work
wstmts = source.match(workid)
rawid = u'_' + str(ix)
work_item = {
u'id': u'work' + rawid,
u'label': rawid,
#u'label': u'{0}, {1}'.format(row['TPNAML'], row['TPNAMF']),
u'type': u'WorkRecord',
}
#Instance starts with same as work, with leader added
instance_item = {
u'leader': leader,
}
instance_item.update(work_item)
instance_item[u'id'] = u'instance' + rawid
instance_item[u'type'] = u'InstanceRecord'
work_item[u'instance'] = u'instance' + rawid
for cf in rec.xml_select(u'ma:controlfield', prefixes=PREFIXES):
key = u'cftag_' + U(cf.xml_select(u'@tag'))
val = U(cf)
if list(cf.xml_select(u'ma:subfield', prefixes=PREFIXES)):
for sf in cf.xml_select(u'ma:subfield', prefixes=PREFIXES):
code = U(sf.xml_select(u'@code'))
sfval = U(sf)
#For now assume all leader fields are instance level
instance_item[key + code] = sfval
else:
#For now assume all leader fields are instance level
instance_item[key] = val
for df in rec.xml_select(u'ma:datafield', prefixes=PREFIXES):
code = U(df.xml_select(u'@tag'))
key = u'dftag_' + code
val = U(df)
if list(df.xml_select(u'ma:subfield', prefixes=PREFIXES)):
subfields = dict(( (U(sf.xml_select(u'@code')), U(sf)) for sf in df.xml_select(u'ma:subfield', prefixes=PREFIXES) ))
lookup = code
#See if any of the field codes represents a reference to an object which can be materialized
handled = False
if code in MATERIALIZE:
(subst, extra_props) = MATERIALIZE[code]
props = {u'marccode': code}
props.update(extra_props)
#props.update(other_properties)
props.update(subfields)
#work_item[FIELD_RENAMINGS.get(code, code)] = subid
subid = subobjs.add(props)
if code in INSTANCE_FIELDS:
instance_item.setdefault(subst, []).append(subid)
elif code in WORK_FIELDS:
work_item.setdefault(subst, []).append(subid)
handled = True
if code in MATERIALIZE_VIA_ANNOTATION:
(subst, extra_object_props, extra_annotation_props) = MATERIALIZE_VIA_ANNOTATION[code]
object_props = {u'marccode': code}
object_props.update(extra_object_props)
#props.update(other_properties)
#Separate annotation subfields from object subfields
object_subfields = subfields.copy()
annotation_subfields = {}
for k, v in object_subfields.items():
if code+k in ANNOTATIONS_FIELDS:
annotation_subfields[k] = v
del object_subfields[k]
object_props.update(object_subfields)
objectid = subobjs.add(object_props)
ann_props = {subst: objectid, u'on_work': work_item[u'id'], u'on_instance': instance_item[u'id'],}
ann_props.update(extra_annotation_props)
ann_props.update(annotation_subfields)
annid = anns.add(ann_props)
#Note, even though we have the returned annotation ID we do not use it. No link back from work/instance to annotation
print >> sys.stderr, '.',
if code in INSTANCE_FIELDS:
instance_item.setdefault('annotation', []).append(annid)
elif code in WORK_FIELDS:
work_item.setdefault('annotation', []).append(annid)
#The actual subfields go to the annotations sink
#annotations_props = {u'annotates': instance_item[u'id']}
#annotations_props.update(props)
#subid = subobjs.add(annotations_props, annotations_sink)
#The reference is from the instance ID
#instance_item.setdefault(subst, []).append(subid)
handled = True
#work_item.setdefault(FIELD_RENAMINGS.get(code, code), []).append(subid)
#See if any of the field+subfield codes represents a reference to an object which can be materialized
if not handled:
for k, v in subfields.items():
lookup = code + k
if lookup in MATERIALIZE:
(subst, extra_props) = MATERIALIZE[lookup]
props = {u'marccode': code, k: v}
props.update(extra_props)
#print >> sys.stderr, lookup, k, props,
subid = subobjs.add(props)
if lookup in INSTANCE_FIELDS or code in INSTANCE_FIELDS:
instance_item.setdefault(subst, []).append(subid)
elif lookup in WORK_FIELDS or code in WORK_FIELDS:
work_item.setdefault(subst, []).append(subid)
handled = True
else:
field_name = u'dftag_' + lookup
if lookup in FIELD_RENAMINGS:
field_name = FIELD_RENAMINGS[lookup]
#Handle the simple field_nameitution of a label name for a MARC code
if lookup in INSTANCE_FIELDS or code in INSTANCE_FIELDS:
instance_item.setdefault(field_name, []).append(v)
elif lookup in WORK_FIELDS or code in WORK_FIELDS:
work_item.setdefault(field_name, []).append(v)
#print >> sys.stderr, lookup, key
elif not handled:
if code in INSTANCE_FIELDS:
instance_item[key] = val
elif code in WORK_FIELDS:
work_item[key] = val
else:
if code in INSTANCE_FIELDS:
instance_item[key] = val
elif code in WORK_FIELDS:
work_item[key] = val
#link = work_item.get(u'cftag_008')
#Handle ISBNs re: https://foundry.zepheira.com/issues/1976
new_instances = []
if not new_instances:
#Make sure it's created as an instance even if it has no ISBN
new_instances.append(instance_item)
instance_ids.append(base_instance_id)
work_item[u'instance'] = instance_ids
special_properties = {}
for k, v in process_leader(leader):
special_properties.setdefault(k, set()).add(v)
for k, v in process_008(instance_item[u'cftag_008']):
special_properties.setdefault(k, set()).add(v)
#We get some repeated values out of leader & 008 processing, and we want to
#Remove dupes so we did so by working with sets then converting to lists
for k, v in special_properties.items():
special_properties[k] = list(v)
instance_item.update(special_properties)
#reduce lists of just one item
for k, v in work_item.items():
if type(v) is list and len(v) == 1:
work_item[k] = v[0]
work_sink.send(work_item)
def send_instance(instance):
for k, v in instance.items():
if type(v) is list and len(v) == 1:
instance[k] = v[0]
instance_sink.send(instance)
for ninst in new_instances:
send_instance(ninst)
#stub_item = {
# u'id': rawid,
# u'label': rawid,
# u'type': u'MarcRecord',
#}
#stub_sink.send(stub_item)
ix += 1
print >> sys.stderr, '+',
return
target = receive_items()
for stmt in source.match(None, TYPE_REL, WORKCLASS):
workid = stmt[SUBJECT]
target.send(workid)
target.close()
return
| apache-2.0 | 325,627,981,747,450,900 | 41.163866 | 141 | 0.489686 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/wat.py | 1 | 5036 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
unified_strdate,
HEADRequest,
int_or_none,
)
class WatIE(InfoExtractor):
_VALID_URL = r'(?:wat:|https?://(?:www\.)?wat\.tv/video/.*-)(?P<id>[0-9a-z]+)'
IE_NAME = 'wat.tv'
_TESTS = [
{
'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
'info_dict': {
'id': '11713067',
'ext': 'mp4',
'title': 'Soupe de figues à l\'orange et aux épices',
'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
'upload_date': '20140819',
'duration': 120,
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['HTTP Error 404'],
},
{
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
'md5': 'b16574df2c3cd1a36ca0098f2a791925',
'info_dict': {
'id': '11713075',
'ext': 'mp4',
'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
'upload_date': '20140816',
},
'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
},
]
_FORMATS = (
(200, 416, 234),
(400, 480, 270),
(600, 640, 360),
(1200, 640, 360),
(1800, 960, 540),
(2500, 1280, 720),
)
def _real_extract(self, url):
video_id = self._match_id(url)
video_id = video_id if video_id.isdigit() and len(video_id) > 6 else compat_str(int(video_id, 36))
# 'contentv4' is used in the website, but it also returns the related
# videos, we don't need them
video_data = self._download_json(
'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
video_info = video_data['media']
error_desc = video_info.get('error_desc')
if error_desc:
self.report_warning(
'%s returned error: %s' % (self.IE_NAME, error_desc))
chapters = video_info['chapters']
if chapters:
first_chapter = chapters[0]
def video_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
if video_id_for_chapter(first_chapter) != video_id:
self.to_screen('Multipart video detected')
entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
return self.playlist_result(entries, video_id, video_info['title'])
# Otherwise we can continue and extract just one part, we have to use
# the video id for getting the video url
else:
first_chapter = video_info
title = first_chapter['title']
def extract_url(path_template, url_type):
req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type, fatal=False)
if head:
red_url = head.geturl()
if req_url != red_url:
return red_url
return None
def remove_bitrate_limit(manifest_url):
return re.sub(r'(?:max|min)_bitrate=\d+&?', '', manifest_url)
formats = []
try:
alt_urls = lambda manifest_url: [re.sub(r'(?:wdv|ssm)?\.ism/', repl + '.ism/', manifest_url) for repl in
('', 'ssm')]
manifest_urls = self._download_json(
'http://www.wat.tv/get/webhtml/' + video_id, video_id)
m3u8_url = manifest_urls.get('hls')
if m3u8_url:
m3u8_url = remove_bitrate_limit(m3u8_url)
for m3u8_alt_url in alt_urls(m3u8_url):
formats.extend(self._extract_m3u8_formats(
m3u8_alt_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
formats.extend(self._extract_f4m_formats(
m3u8_alt_url.replace('ios', 'web').replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
mpd_url = manifest_urls.get('mpd')
if mpd_url:
mpd_url = remove_bitrate_limit(mpd_url)
for mpd_alt_url in alt_urls(mpd_url):
formats.extend(self._extract_mpd_formats(
mpd_alt_url, video_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
except ExtractorError:
abr = 64
for vbr, width, height in self._FORMATS:
tbr = vbr + abr
format_id = 'http-%s' % tbr
fmt_url = 'http://dnl.adv.tf1.fr/2/USP-0x0/%s/%s/%s/ssm/%s-%s-64k.mp4' % (
video_id[-4:-2], video_id[-2:], video_id, video_id, vbr)
if self._is_valid_url(fmt_url, video_id, format_id):
formats.append({
'format_id': format_id,
'url': fmt_url,
'vbr': vbr,
'abr': abr,
'width': width,
'height': height,
})
date_diffusion = first_chapter.get('date_diffusion') or video_data.get('configv4', {}).get('estatS4')
upload_date = unified_strdate(date_diffusion) if date_diffusion else None
duration = None
files = video_info['files']
if files:
duration = int_or_none(files[0].get('duration'))
return {
'id': video_id,
'title': title,
'thumbnail': first_chapter.get('preview'),
'description': first_chapter.get('description'),
'view_count': int_or_none(video_info.get('views')),
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}
| gpl-3.0 | -3,411,929,487,945,467,400 | 30.628931 | 108 | 0.626566 | false |
zzqcn/wireshark | tools/yacc.py | 3 | 134386 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.8'
__tabversion__ = '3.8'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec, slevel = Productions[st_actionp[a].number].prec
rprec, rlevel = Precedence.get(a, ('right', 0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec, rlevel = Productions[st_actionp[a].number].prec
sprec, slevel = Precedence.get(a, ('right', 0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(' '.join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError, ValueError):
pass
digest = base64.b16encode(sig.digest())
if sys.version_info[0] >= 3:
digest = digest.decode('latin-1')
return digest
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
lines, linen = inspect.getsourcelines(module)
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = item.__code__.co_firstlineno
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (https://www.dabeaz.com/ply/)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| gpl-2.0 | -6,985,435,174,543,016,000 | 37.975058 | 119 | 0.464163 | false |
benvanwerkhoven/kernel_tuner | examples/cuda/convolution_correct.py | 1 | 3195 | #!/usr/bin/env python
""" convolution with correctness checks
This example is mostly the same as the Convolution example. The only
difference is that a naive kernel is used to compute a reference
output. This reference output is used to check the correctness of
every kernel before it is benchmarked.
This is done using the run_kernel() function of the Kernel Tuner and
the `answer` option of the tune_kernel function.
The run_kernel function simply runs a kernel using much of the same
interface as tune_kernel, however, for each tuning_parameter you pass
a single value instead of a list of options. The run_kernel function
returns a list of arguments that contains the output of the kernel.
When calling tune_kernel you specify the `answer` as a list, which
is similar to the arguments list of the kernel. To separate input
and output arguments you insert a `None` value in the answer list
for all arguments that are actually inputs to the kernel. The
values in the answers list that are not None are used to verify
the correctness of every kernel in the parameter space before it is
benchmarked.
"""
import numpy
import kernel_tuner
from collections import OrderedDict
def tune():
with open('convolution.cu', 'r') as f:
kernel_string = f.read()
filter_size = (17, 17)
problem_size = (4096, 4096)
size = numpy.prod(problem_size)
border_size = (filter_size[0]//2*2, filter_size[1]//2*2)
input_size = ((problem_size[0]+border_size[0]) * (problem_size[1]+border_size[1]))
output = numpy.zeros(size).astype(numpy.float32)
input = numpy.random.randn(input_size).astype(numpy.float32)
filter = numpy.random.randn(filter_size[0]*filter_size[1]).astype(numpy.float32)
cmem_args= {'d_filter': filter }
args = [output, input, filter]
tune_params = OrderedDict()
tune_params["filter_width"] = [filter_size[0]]
tune_params["filter_height"] = [filter_size[1]]
tune_params["block_size_x"] = [16*i for i in range(1,9)]
tune_params["block_size_y"] = [2**i for i in range(1,6)]
tune_params["tile_size_x"] = [2**i for i in range(3)]
tune_params["tile_size_y"] = [2**i for i in range(3)]
tune_params["use_padding"] = [0,1] #toggle the insertion of padding in shared memory
tune_params["read_only"] = [0,1] #toggle using the read-only cache
grid_div_x = ["block_size_x", "tile_size_x"]
grid_div_y = ["block_size_y", "tile_size_y"]
#compute the answer using a naive kernel
params = { "block_size_x": 16, "block_size_y": 16}
tune_params["filter_width"] = [filter_size[0]]
tune_params["filter_height"] = [filter_size[1]]
results = kernel_tuner.run_kernel("convolution_naive", kernel_string,
problem_size, args, params,
grid_div_y=["block_size_y"], grid_div_x=["block_size_x"])
#set non-output fields to None
answer = [results[0], None, None]
#start kernel tuning with correctness verification
return kernel_tuner.tune_kernel("convolution_kernel", kernel_string,
problem_size, args, tune_params,
grid_div_y=grid_div_y, grid_div_x=grid_div_x, verbose=True, cmem_args=cmem_args, answer=answer)
if __name__ == "__main__":
tune()
| apache-2.0 | -1,021,302,405,249,030,900 | 38.9375 | 103 | 0.692019 | false |
lacatus/TFM | datasets/pets095.py | 1 | 3494 | #!/usr/bin/env python
from datasets import cp
from datasets import variables
from datasets import Camera
def loaddataset():
setglobalvariables()
loadcameras()
return getcameras(), loadconfiguration()
def setglobalvariables():
variables.current_dataset_path = variables.datasets_path + '/pets09'
variables.current_video_path = variables.current_dataset_path + \
'/s0/regularflow/time_14_03'
def loadcameras():
global cam1_g1
global cam2_g1
global cam3_g1
global cam4_g1
cam1_g1 = Camera()
cam2_g1 = Camera()
cam3_g1 = Camera()
cam4_g1 = Camera()
cam1_g1.video.readvideo(variables.current_video_path + '/camera001.avi')
cam2_g1.video.readvideo(variables.current_video_path + '/camera002.avi')
cam3_g1.video.readvideo(variables.current_video_path + '/camera003.avi')
cam4_g1.video.readvideo(variables.current_video_path + '/camera004.avi')
cam1_g1.video.readbg(
variables.current_video_path + '/background/camera001.jpg')
cam2_g1.video.readbg(
variables.current_video_path + '/background/camera002.jpg')
cam3_g1.video.readbg(
variables.current_video_path + '/background/camera003.jpg')
cam4_g1.video.readbg(
variables.current_video_path + '/background/camera004.jpg')
cam1_str = variables.current_dataset_path + '/cameracalib/camera001.cfg'
cam2_str = variables.current_dataset_path + '/cameracalib/camera002.cfg'
cam3_str = variables.current_dataset_path + '/cameracalib/camera003.cfg'
cam4_str = variables.current_dataset_path + '/cameracalib/camera004.cfg'
cam1_g1.readconfigfile(cam1_str)
cam2_g1.readconfigfile(cam2_str)
cam3_g1.readconfigfile(cam3_str)
cam4_g1.readconfigfile(cam4_str)
def loadglobalconfiguration(c):
dst = {
'option': c.getint('global', 'option'),
'alpha': c.getfloat('global', 'alpha'),
'beta': c.getfloat('global', 'beta'),
'frame_count': c.getint('global', 'frame_count'),
'threshold_1': c.getint('global', 'threshold_1'),
'threshold_2': c.getint('global', 'threshold_2'),
'waitkey': c.getint('global', 'waitkey')
}
return dst
def loadcamconfiguration(c, cam_id):
dst = {
'win_height': c.getint(cam_id, 'win_height'),
'win_width': c.getint(cam_id, 'win_width'),
'win_min_pix': c.getint(cam_id, 'win_min_pix')
}
return dst
def loadconfiguration():
config_file = variables.current_video_path + '/configuration/config.cfg'
c = cp.ConfigParser()
c.read(config_file)
configuration = {
'global': loadglobalconfiguration(c),
'Camera001': loadcamconfiguration(c, 'Camera001'),
'Camera002': loadcamconfiguration(c, 'Camera002'),
'Camera003': loadcamconfiguration(c, 'Camera003'),
'Camera004': loadcamconfiguration(c, 'Camera004'),
'dir': config_file
}
return configuration
def getcam1():
return cam1_g1
def getcam2():
return cam2_g1
def getcam3():
return cam3_g1
def getcam4():
return cam4_g1
def getcameras():
cam1 = getcam1()
cam2 = getcam2()
cam3 = getcam3()
cam4 = getcam4()
cam1.printcamerainfo()
cam2.printcamerainfo()
cam3.printcamerainfo()
cam4.printcamerainfo()
return [cam1, cam2, cam3, cam4]
def printcamerainfo():
cam1_g1.printcamerainfo()
cam2_g1.printcamerainfo()
cam3_g1.printcamerainfo()
cam4_g1.printcamerainfo()
| apache-2.0 | 7,430,277,438,438,640,000 | 23.263889 | 76 | 0.655409 | false |
noironetworks/heat | heat/common/policy.py | 1 | 6824 | #
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Based on glance/api/policy.py
"""Policy Engine For Heat."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
ENFORCER = None
class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, scope='heat', exc=exception.Forbidden,
default_rule=DEFAULT_RULES['default'], policy_file=None):
self.scope = scope
self.exc = exc
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
self.log_not_registered = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules."""
rules_obj = policy.Rules(rules, self.default_rule)
self.enforcer.set_rules(rules_obj, overwrite)
def load_rules(self, force_reload=False):
"""Set the rules found in the json file on disk."""
self.enforcer.load_rules(force_reload)
def _check(self, context, rule, target, exc,
is_registered_policy=False, *args, **kwargs):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param rule: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
do_raise = False if not exc else True
credentials = context.to_policy_values()
if is_registered_policy:
try:
return self.enforcer.authorize(rule, target, credentials,
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
if self.log_not_registered:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy not registered.'))
else:
raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
def enforce(self, context, action, scope=None, target=None,
is_registered_policy=False):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
_action = '%s:%s' % (scope or self.scope, action)
_target = target or {}
return self._check(context, _action, _target, self.exc, action=action,
is_registered_policy=is_registered_policy)
def check_is_admin(self, context):
"""Whether or not is admin according to policy.
By default the rule will check whether or not roles contains
'admin' role and is admin project.
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
return self._check(context, 'context_is_admin', target={}, exc=None,
is_registered_policy=True)
def get_enforcer():
global ENFORCER
if ENFORCER is None:
ENFORCER = Enforcer()
return ENFORCER
class ResourceEnforcer(Enforcer):
def __init__(self, default_rule=DEFAULT_RESOURCE_RULES['default'],
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
self.log_not_registered = False
def _enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
LOG.info(six.text_type(ex))
raise
if not result:
if self.exc:
raise self.exc(action=res_type)
return result
def enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
result = self._enforce(context, res_type, scope, target,
is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
try:
return self._enforce(context, res_type_wc, scope, target,
is_registered_policy=is_registered_policy)
except self.exc:
raise self.exc(action=res_type)
return result
def enforce_stack(self, stack, scope=None, target=None,
is_registered_policy=False):
for res in stack.resources.values():
self.enforce(stack.context, res.type(), scope=scope, target=target,
is_registered_policy=is_registered_policy)
| apache-2.0 | -8,548,374,885,071,395,000 | 37.994286 | 79 | 0.603605 | false |
vlukes/dicom2fem | setup.py | 1 | 3448 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dicom2fem',
description='Generation of finite element meshes from DICOM images',
long_desctioption="Generation of finite element meshes using computed " +
"tomography scans. Segmentation is based on the graph cut algorithm.",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.0.0',
url='https://github.com/vlukes/dicom2fem',
author='Vladimir Lukes',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='fem dicom',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['numpy', 'pysegbase'],
dependency_links=['https://github.com/mjirik/gco_python'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| bsd-3-clause | -117,617,626,211,072,220 | 42.1 | 98 | 0.657193 | false |
Herpinemmanuel/Oceanography | intergrid.py | 1 | 8478 | """ interpolate data given on an Nd rectangular grid, uniform or non-uniform.
Purpose: extend the fast N-dimensional interpolator
`scipy.ndimage.map_coordinates` to non-uniform grids, using `np.interp`.
Background: please look at
http://en.wikipedia.org/wiki/Bilinear_interpolation
http://stackoverflow.com/questions/6238250/multivariate-spline-interpolation-in-python-scipy
http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.ndimage.interpolation.map_coordinates.html
Example
-------
Say we have rainfall on a 4 x 5 grid of rectangles, lat 52 .. 55 x lon -10 .. -6,
and want to interpolate (estimate) rainfall at 1000 query points
in between the grid points.
# define the grid --
griddata = np.loadtxt(...) # griddata.shape == (4, 5)
lo = np.array([ 52, -10 ]) # lowest lat, lowest lon
hi = np.array([ 55, -6 ]) # highest lat, highest lon
# set up an interpolator function "interfunc()" with class Intergrid --
interfunc = Intergrid( griddata, lo=lo, hi=hi )
# generate 1000 random query points, lo <= [lat, lon] <= hi --
query_points = lo + np.random.uniform( size=(1000, 2) ) * (hi - lo)
# get rainfall at the 1000 query points --
query_values = interfunc( query_points ) # -> 1000 values
What this does:
for each [lat, lon] in query_points:
1) find the square of griddata it's in,
e.g. [52.5, -8.1] -> [0, 3] [0, 4] [1, 4] [1, 3]
2) do bilinear (multilinear) interpolation in that square,
using `scipy.ndimage.map_coordinates` .
Check:
interfunc( lo ) -> griddata[0, 0],
interfunc( hi ) -> griddata[-1, -1] i.e. griddata[3, 4]
Parameters
----------
griddata: numpy array_like, 2d 3d 4d ...
lo, hi: user coordinates of the corners of griddata, 1d array-like, lo < hi
maps: a list of `dim` descriptors of piecewise-linear or nonlinear maps,
e.g. [[50, 52, 62, 63], None] # uniformize lat, linear lon
copy: make a copy of query_points, default True;
copy=False overwrites query_points, runs in less memory
verbose: default 1: print a 1-line summary for each call, with run time
order=1: see `map_coordinates`
prefilter: 0 or False, the default: smoothing B-spline
1 or True: exact-fit interpolating spline (IIR, not C-R)
1/3: Mitchell-Netravali spline, 1/3 B + 2/3 fit
(prefilter is only for order > 1, since order = 1 interpolates)
Non-uniform rectangular grids
-----------------------------
What if our griddata above is at non-uniformly-spaced latitudes,
say [50, 52, 62, 63] ? `Intergrid` can "uniformize" these
before interpolation, like this:
lo = np.array([ 50, -10 ])
hi = np.array([ 63, -6 ])
maps = [[50, 52, 62, 63], None] # uniformize lat, linear lon
interfunc = Intergrid( griddata, lo=lo, hi=hi, maps=maps )
This will map (transform, stretch, warp) the lats in query_points column 0
to array coordinates in the range 0 .. 3, using `np.interp` to do
piecewise-linear (PWL) mapping:
50 51 52 53 54 55 56 57 58 59 60 61 62 63 # lo[0] .. hi[0]
0 .5 1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2 3
`maps[1] None` says to map the lons in query_points column 1 linearly:
-10 -9 -8 -7 -6 # lo[1] .. hi[1]
0 1 2 3 4
More doc: https://denis-bz.github.com/docs/intergrid.html
"""
# split class Gridmap ?
from __future__ import division
from time import time
# warnings
import numpy as np
from scipy.ndimage import map_coordinates, spline_filter
__version__ = "2014-05-09 leif denby" # 9may: fix bug default argument bug
__author_email__ = "[email protected]" # comments welcome, testcases most welcome
#...............................................................................
class Intergrid:
__doc__ = globals()["__doc__"]
def __init__( self, griddata, lo, hi, maps=None, copy=True, verbose=1,
order=1, prefilter=False ):
griddata = np.asanyarray( griddata )
dim = griddata.ndim # - (griddata.shape[-1] == 1) # ??
assert dim >= 2, griddata.shape
self.dim = dim
if np.isscalar(lo):
lo *= np.ones(dim)
if np.isscalar(hi):
hi *= np.ones(dim)
self.loclip = lo = np.asarray_chkfinite( lo ).copy()
self.hiclip = hi = np.asarray_chkfinite( hi ).copy()
assert lo.shape == (dim,), lo.shape
assert hi.shape == (dim,), hi.shape
self.copy = copy
self.verbose = verbose
self.order = order
if order > 1 and 0 < prefilter < 1: # 1/3: Mitchell-Netravali = 1/3 B + 2/3 fit
exactfit = spline_filter( griddata ) # see Unser
griddata += prefilter * (exactfit - griddata)
prefilter = False
self.griddata = griddata
self.prefilter = (prefilter == True)
if maps is None:
maps = [None,] * len(lo)
self.maps = maps
self.nmap = 0
if len(maps) > 0:
assert len(maps) == dim, "maps must have len %d, not %d" % (
dim, len(maps))
# linear maps (map None): Xcol -= lo *= scale -> [0, n-1]
# nonlinear: np.interp e.g. [50 52 62 63] -> [0 1 2 3]
self._lo = np.zeros(dim)
self._scale = np.ones(dim)
for j, (map, n, l, h) in enumerate( zip( maps, griddata.shape, lo, hi )):
## print "test: j map n l h:", j, map, n, l, h
if map is None or callable(map):
self._lo[j] = l
if h > l:
self._scale[j] = (n - 1) / (h - l) # _map lo -> 0, hi -> n - 1
else:
self._scale[j] = 0 # h <= l: X[:,j] -> 0
continue
self.maps[j] = map = np.asanyarray(map)
self.nmap += 1
assert len(map) == n, "maps[%d] must have len %d, not %d" % (
j, n, len(map) )
mlo, mhi = map.min(), map.max()
if not (l <= mlo <= mhi <= h):
print ("Warning: Intergrid maps[%d] min %.3g max %.3g " \
"are outside lo %.3g hi %.3g" % (
j, mlo, mhi, l, h ))
#...............................................................................
def _map_to_uniform_grid( self, X ):
""" clip, map X linear / nonlinear inplace """
np.clip( X, self.loclip, self.hiclip, out=X )
# X nonlinear maps inplace --
for j, map in enumerate(self.maps):
if map is None:
continue
if callable(map):
X[:,j] = map( X[:,j] ) # clip again ?
else:
# PWL e.g. [50 52 62 63] -> [0 1 2 3] --
X[:,j] = np.interp( X[:,j], map, np.arange(len(map)) )
# linear map the rest, inplace (nonlinear _lo 0, _scale 1: noop)
if self.nmap < self.dim:
X -= self._lo
X *= self._scale # (griddata.shape - 1) / (hi - lo)
## print "test: _map_to_uniform_grid", X.T
#...............................................................................
def __call__( self, X, out=None ):
""" query_values = Intergrid(...) ( query_points npt x dim )
"""
X = np.asanyarray(X)
assert X.shape[-1] == self.dim, ("the query array must have %d columns, "
"but its shape is %s" % (self.dim, X.shape) )
Xdim = X.ndim
if Xdim == 1:
X = np.asarray([X]) # in a single point -> out scalar
if self.copy:
X = X.copy()
assert X.ndim == 2, X.shape
npt = X.shape[0]
if out is None:
out = np.empty( npt, dtype=self.griddata.dtype )
t0 = time()
self._map_to_uniform_grid( X ) # X inplace
#...............................................................................
map_coordinates( self.griddata, X.T,
order=self.order, prefilter=self.prefilter,
mode="nearest", # outside -> edge
# test: mode="constant", cval=np.NaN,
output=out )
if self.verbose:
print ("Intergrid: %.3g msec %d points in a %s grid %d maps order %d" % (
(time() - t0) * 1000, npt, self.griddata.shape, self.nmap, self.order ))
return out if Xdim == 2 else out[0]
at = __call__
# end intergrid.py
| mit | 2,374,753,777,518,892,500 | 40.763547 | 104 | 0.525124 | false |
openstack/rally | rally/common/plugin/info.py | 1 | 4372 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
PARAM_OR_RETURNS_REGEX = re.compile(":(?:param|returns)")
RETURNS_REGEX = re.compile(":returns: (?P<doc>.*)", re.S)
PARAM_REGEX = re.compile(r":param (?P<name>[\*\w]+): (?P<doc>.*?)"
r"(?:(?=:param)|(?=:return)|(?=:raises)|\Z)", re.S)
def trim(docstring):
"""trim function from PEP-257"""
if not docstring:
return ""
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Current code/unittests expects a line return at
# end of multiline docstrings
# workaround expected behavior from unittests
if "\n" in docstring:
trimmed.append("")
# Return a single string:
return "\n".join(trimmed)
def reindent(string):
return "\n".join(line.strip() for line in string.strip().split("\n"))
def parse_docstring(docstring):
"""Parse the docstring into its components.
:returns: a dictionary of form
{
"short_description": ...,
"long_description": ...,
"params": [{"name": ..., "doc": ...}, ...],
"returns": ...
}
"""
short_description = long_description = returns = ""
params = []
if docstring:
docstring = trim(docstring.lstrip("\n"))
lines = docstring.split("\n", 1)
short_description = lines[0]
if len(lines) > 1:
long_description = lines[1].strip()
params_returns_desc = None
match = PARAM_OR_RETURNS_REGEX.search(long_description)
if match:
long_desc_end = match.start()
params_returns_desc = long_description[long_desc_end:].strip()
long_description = long_description[:long_desc_end].rstrip()
if params_returns_desc:
params = [
{"name": name, "doc": trim(doc)}
for name, doc in PARAM_REGEX.findall(params_returns_desc)
]
match = RETURNS_REGEX.search(params_returns_desc)
if match:
returns = reindent(match.group("doc"))
return {
"short_description": short_description,
"long_description": long_description,
"params": params,
"returns": returns
}
class InfoMixin(object):
@classmethod
def _get_doc(cls):
"""Return documentary of class
By default it returns docstring of class, but it can be overridden
for example for cases like merging own docstring with parent
"""
return cls.__doc__
@classmethod
def get_info(cls):
doc = parse_docstring(cls._get_doc())
return {
"name": cls.get_name(),
"platform": cls.get_platform(),
"module": cls.__module__,
"title": doc["short_description"],
"description": doc["long_description"],
"parameters": doc["params"],
"schema": getattr(cls, "CONFIG_SCHEMA", None),
"returns": doc["returns"]
}
| apache-2.0 | -1,733,923,676,359,351,300 | 30.912409 | 78 | 0.576624 | false |
beefoo/hollywood-diversity | scripts/imdb_get_images.py | 1 | 2896 | # -*- coding: utf-8 -*-
# Description:
# This file takes in a .csv file of people and retrieves their images from IMDB if they exist
# Example usage:
# python imdb_get_images.py ../data/people_box_office_top_50_movies_1995-2014_imdb.csv
# python imdb_get_images.py ../data/people_box_office_top_10_movies_2011-2015_imdb_subset.csv
from bs4 import BeautifulSoup
import csv
import sys
import urllib2
if len(sys.argv) < 1:
print "Usage: %s <inputfile csv>" % sys.argv[0]
sys.exit(1)
PEOPLE_FILE = sys.argv[1]
overwrite_existing = False
update_file = True
save_after = 10
images = {}
people = []
headers = []
headers_to_add = ['img']
with open(PEOPLE_FILE, 'rb') as f:
rows = csv.reader(f, delimiter=',')
headers = next(rows, None) # remove header
if 'imdb_id' not in headers:
print PEOPLE_FILE + " must have column <imdb_id>"
sys.exit(1)
# init people list
for h in headers_to_add:
if h not in headers:
headers.append(h)
# populate people list
for row in rows:
person = {}
for i, h in enumerate(headers):
if (i >= len(row)): # doesn't exist, add as blank
person[h] = ''
else:
person[h] = row[i]
people.append(person)
def save_people():
global PEOPLE_FILE
global headers
global people
# Write data back to file
with open(PEOPLE_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(headers)
for p in people:
row = []
for h in headers:
row.append(p[h])
w.writerow(row)
print('Successfully updated file: '+PEOPLE_FILE)
unsaved = 0
for i, p in enumerate(people):
save = False
# Image was already found for this person
if p['imdb_id'] in images:
people[i]['img'] = images[p['imdb_id']]
unsaved += 1
# Otherwise, fetch remote page and parse for image
elif overwrite_existing or not p['img']:
try:
html_contents = urllib2.urlopen("http://akas.imdb.com/name/nm"+p['imdb_id']+"/").read()
contents = BeautifulSoup(html_contents, 'html.parser')
image_srcs = contents.findAll('link', rel='image_src')
except:
print("URL Error: " + "http://akas.imdb.com/name/nm"+p['imdb_id']+"/")
image_srcs = [{'href': ''}]
image_src = 'none'
# image found
if len(image_srcs):
image_src = image_srcs[0]['href']
# image is default image
if 'imdb_fb_logo' in image_src:
image_src = 'none'
people[i]['img'] = image_src
images[p['imdb_id']] = image_src
unsaved += 1
print 'Found ' + str(i) + '. ' + people[i]['img'] + ' for '+p['imdb_id']
# Save data
if update_file and unsaved >= save_after:
save_people()
unsaved = 0
save_people()
| mit | -4,167,225,041,933,074,400 | 28.85567 | 99 | 0.56837 | false |
sburnett/bismark-release-manager | main.py | 1 | 17515 | #!/usr/bin/env python2.7
import argparse
import logging
import os
import subcommands
import tree
def create_groups_subcommands(subparsers):
parser_list_group = subparsers.add_parser(
'list', help='list nodes in a groups')
parser_list_group.add_argument(
'name', type=str, nargs='?', action='store', help='name of the group to list')
parser_list_group.set_defaults(handler=subcommands.list_group)
parser_list_all_groups = subparsers.add_parser(
'list-all', help='list all groups of nodes')
parser_list_all_groups.set_defaults(handler=subcommands.list_all_groups)
parser_new_group = subparsers.add_parser(
'new', help='create a new group of nodes')
parser_new_group.add_argument(
'name', type=str, action='store', help='name of the new group')
parser_new_group.add_argument(
'node', nargs='*', type=str, action='store', help='nodes to add')
parser_new_group.set_defaults(handler=subcommands.new_group)
parser_copy_group = subparsers.add_parser(
'copy', help='copy a group of nodes')
parser_copy_group.add_argument(
'name', type=str, action='store', help='name of the group to copy')
parser_copy_group.add_argument(
'new_name', type=str, action='store', help='name of the new copy')
parser_copy_group.set_defaults(handler=subcommands.copy_group)
parser_delete_group = subparsers.add_parser(
'delete', help='delete a group of nodes')
parser_delete_group.add_argument(
'name', type=str, action='store', help='name of the group to delete')
parser_delete_group.set_defaults(handler=subcommands.delete_group)
parser_add_to_group = subparsers.add_parser(
'add-nodes', help='add nodes to a group')
parser_add_to_group.add_argument(
'group', type=str, action='store', help='name of the group')
parser_add_to_group.add_argument(
'node', nargs='+', type=str, action='store', help='nodes to add')
parser_add_to_group.set_defaults(handler=subcommands.add_to_group)
parser_remove_from_group = subparsers.add_parser(
'remove-nodes', help='remove nodes from a group')
parser_remove_from_group.add_argument(
'group', type=str, action='store', help='name of the group')
parser_remove_from_group.add_argument(
'node', nargs='+', type=str, action='store', help='nodes to remove')
parser_remove_from_group.set_defaults(
handler=subcommands.remove_from_group)
def create_experiments_subcommands(subparsers):
parser_new_experiment = subparsers.add_parser(
'new', help='create a new experiment')
parser_new_experiment.add_argument(
'name', type=str, action='store', help='name of the new experiment')
parser_new_experiment.set_defaults(handler=subcommands.new_experiment)
parser_add_to_experiment = subparsers.add_parser(
'add-package', help='add a package to an experiment')
parser_add_to_experiment.add_argument(
'experiment', type=str, action='store', help='experiment identifier')
parser_add_to_experiment.add_argument(
'group', type=str, action='store', help='enable experiment on this group of routers')
parser_add_to_experiment.add_argument(
'release', type=str, action='store', help='add package for this release (e.g., quirm)')
parser_add_to_experiment.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_add_to_experiment.add_argument(
'package', type=str, action='store', help='name of the package to install')
parser_add_to_experiment.add_argument(
'version', type=str, action='store', help='version of the package')
parser_add_to_experiment.set_defaults(
handler=subcommands.add_to_experiment)
parser_remove_from_experiment = subparsers.add_parser(
'remove-package', help='remove a package from an experiment')
parser_remove_from_experiment.add_argument(
'experiment', type=str, action='store', help='experiment identifier')
parser_remove_from_experiment.add_argument(
'group', type=str, action='store', help='remove packages from this group of routers')
parser_remove_from_experiment.add_argument(
'release', type=str, action='store', help='remove package from this release (e.g., quirm)')
parser_remove_from_experiment.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_remove_from_experiment.add_argument(
'package', type=str, action='store', help='name of the package')
parser_remove_from_experiment.add_argument(
'version', type=str, action='store', help='version of the package')
parser_remove_from_experiment.set_defaults(
handler=subcommands.remove_from_experiment)
parser_list_experiment = subparsers.add_parser(
'list', help='list experiment details')
parser_list_experiment.add_argument(
'experiment', type=str, nargs='?', action='store', help='list details for this experiment')
parser_list_experiment.set_defaults(handler=subcommands.list_experiment)
parser_list_experiment = subparsers.add_parser(
'list-all', help='list all experiments')
parser_list_experiment.set_defaults(
handler=subcommands.list_all_experiments)
parser_list_experiment_packages = subparsers.add_parser(
'list-packages', help='list packages for an experiment')
parser_list_experiment_packages.add_argument(
'experiment', type=str, action='store', help='list packages for this experiment')
parser_list_experiment_packages.set_defaults(
handler=subcommands.list_experiment_packages)
parser_install_by_default = subparsers.add_parser(
'install-by-default', help='Install an experiment by default')
parser_install_by_default.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_install_by_default.add_argument(
'group', nargs='+', type=str, action='store', help='install by default on these routers')
parser_install_by_default.set_defaults(
handler=subcommands.install_by_default)
parser_uninstall_by_default = subparsers.add_parser(
'uninstall-by-default', help="Don't Install an experiment by default")
parser_uninstall_by_default.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_uninstall_by_default.add_argument(
'group', nargs='+', type=str, action='store', help='install by default on these routers')
parser_uninstall_by_default.set_defaults(
handler=subcommands.uninstall_by_default)
parser_require_experiment = subparsers.add_parser(
'require', help='require a group of routers to install an experiment')
parser_require_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_require_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='require the experiment on these routers')
parser_require_experiment.set_defaults(
handler=subcommands.require_experiment)
parser_unrequire_experiment = subparsers.add_parser(
'unrequire', help='stop requiring a group of routers to install an experiment')
parser_unrequire_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_unrequire_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='stop requiring the experiment on these routers')
parser_unrequire_experiment.set_defaults(
handler=subcommands.unrequire_experiment)
parser_revoke_experiment = subparsers.add_parser(
'revoke', help='revoke an experiment on a group of routers')
parser_revoke_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_revoke_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='revoke the experiment on these routers')
parser_revoke_experiment.set_defaults(
handler=subcommands.revoke_experiment)
parser_unrevoke_experiment = subparsers.add_parser(
'unrevoke', help='stop revoking a group of routers to install an experiment')
parser_unrevoke_experiment.add_argument(
'experiment', type=str, action='store', help='name of the experiment')
parser_unrevoke_experiment.add_argument(
'group', nargs='+', type=str, action='store', help='stop revoking the experiment on these routers')
parser_unrevoke_experiment.set_defaults(
handler=subcommands.unrevoke_experiment)
def create_packages_subcommands(subparsers):
parser_add_packages = subparsers.add_parser(
'import', help='import ipk files for a release')
parser_add_packages.add_argument(
'release', type=str, action='store', help='import packages for this release (e.g., quirm)')
parser_add_packages.add_argument(
'ipk', nargs='+', type=str, action='store', help='ipkg files to import')
parser_add_packages.set_defaults(handler=subcommands.add_packages)
parser_list_packages = subparsers.add_parser(
'list', help='list available packages')
parser_list_packages.add_argument(
'release', type=str, nargs='?', action='store', help='list packages for this release (e.g., quirm)')
parser_list_packages.set_defaults(handler=subcommands.list_packages)
parser_list_builtin_packages = subparsers.add_parser(
'list-builtin', help='list builtin packages for a release')
parser_list_builtin_packages.add_argument(
'release', type=str, nargs='?', action='store', help='name of the release (e.g., quirm)')
parser_list_builtin_packages.add_argument(
'architecture', type=str, nargs='?', action='store', help='target architecture (e.g., ar71xx)')
parser_list_builtin_packages.set_defaults(
handler=subcommands.list_builtin_packages)
parser_list_extra_packages = subparsers.add_parser(
'list-extra', help='list "extra" packages for a release')
parser_list_extra_packages.add_argument(
'release', type=str, nargs='?', action='store', help='name of the release (e.g., quirm)')
parser_list_extra_packages.add_argument(
'architecture', type=str, nargs='?', action='store', help='target architecture (e.g., ar71xx)')
parser_list_extra_packages.set_defaults(
handler=subcommands.list_extra_packages)
parser_list_upgrades = subparsers.add_parser(
'list-upgrades', help='list package upgrades for nodes')
parser_list_upgrades.add_argument(
'release', type=str, nargs='?', action='store', help='show upgrades from this release (e.g., quirm)')
parser_list_upgrades.set_defaults(handler=subcommands.list_upgrades)
parser_remove_extra_package = subparsers.add_parser(
'remove-from-extra', help='remove packages from the "extra" set')
parser_remove_extra_package.add_argument(
'release', type=str, action='store', help='remove package from this release (e.g., quirm)')
parser_remove_extra_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_remove_extra_package.add_argument(
'package', type=str, action='store', help='name of the package to remove')
parser_remove_extra_package.add_argument(
'version', type=str, action='store', help='version of the package')
parser_remove_extra_package.set_defaults(
handler=subcommands.remove_extra_package)
parser_add_extra_package = subparsers.add_parser(
'add-to-extra', help='add packages to the "extra" set')
parser_add_extra_package.add_argument(
'release', type=str, action='store', help='add package from this release (e.g., quirm)')
parser_add_extra_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_add_extra_package.add_argument(
'package', type=str, action='store', help='name of the package to add')
parser_add_extra_package.add_argument(
'version', type=str, action='store', help='version of the package')
parser_add_extra_package.set_defaults(
handler=subcommands.add_extra_package)
parser_upgrade_package = subparsers.add_parser(
'upgrade', help='upgrade a builtin package on a set of routers')
parser_upgrade_package.add_argument(
'group', type=str, action='store', help='upgrade on this group of routers')
parser_upgrade_package.add_argument(
'release', type=str, action='store', help='upgrade package for this release (e.g., quirm)')
parser_upgrade_package.add_argument(
'architecture', type=str, action='store', help='target architecture (e.g., ar71xx)')
parser_upgrade_package.add_argument(
'package', type=str, action='store', help='name of the builtin package to upgrade')
parser_upgrade_package.add_argument(
'version', type=str, action='store', help='new version of the package')
parser_upgrade_package.set_defaults(handler=subcommands.upgrade_package)
def create_releases_subcommands(subparsers):
parser_list_releases = subparsers.add_parser(
'list', help='list all releases')
parser_list_releases.set_defaults(handler=subcommands.list_releases)
parser_list_architectures = subparsers.add_parser(
'list-architectures', help='list architectures for a release')
parser_list_architectures.add_argument(
'release', type=str, action='store', help='name of the release (e.g., quirm)')
parser_list_architectures.set_defaults(
handler=subcommands.list_architectures)
parser_new_release = subparsers.add_parser(
'new', help='create a new release')
parser_new_release.add_argument(
'name', type=str, action='store', help='name of this release (e.g., quirm)')
parser_new_release.add_argument(
'buildroot', type=str, action='store', help='a compiled OpenWRT buildroot for the release')
parser_new_release.set_defaults(handler=subcommands.new_release)
def main():
parser = argparse.ArgumentParser(
description='Publish releases of BISmark images, packages, and experiments')
parser.add_argument('--root', dest='root', action='store',
default='~/bismark-releases', help='store release configuration in this directory')
log_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITITCAL']
parser.add_argument('--loglevel', dest='loglevel', action='store',
choices=log_levels, default='WARNING', help='control verbosity of logging')
parser.add_argument('--logfile', dest='logfile', action='store',
default=None, help='append logs to this file')
subparsers = parser.add_subparsers(title='commands')
parser_groups = subparsers.add_parser(
'groups', help='Manage groups of nodes')
groups_subparsers = parser_groups.add_subparsers(title='group subcommands')
create_groups_subcommands(groups_subparsers)
parser_experiments = subparsers.add_parser(
'experiments', help='Manage experiments')
experiments_subparsers = parser_experiments.add_subparsers(
title='experiments subcommands')
create_experiments_subcommands(experiments_subparsers)
parser_packages = subparsers.add_parser('packages', help='Manage packages')
packages_subparsers = parser_packages.add_subparsers(
title='packages subcommands')
create_packages_subcommands(packages_subparsers)
parser_releases = subparsers.add_parser('releases', help='Manage releases')
releases_subparsers = parser_releases.add_subparsers(
title='releases subcommands')
create_releases_subcommands(releases_subparsers)
parser_commit = subparsers.add_parser(
'commit', help='commit current release configuration to git')
parser_commit.set_defaults(handler=subcommands.commit)
parser_diff = subparsers.add_parser(
'diff', help='show changes ready to be committed to git')
parser_diff.set_defaults(handler=subcommands.diff)
parser_deploy = subparsers.add_parser('deploy',
help='deploy all releases',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_deploy.add_argument(
'-d', '--destination', type=str,
default='bismark-downloads.noise.gatech.edu:/var/www/downloads.projectbismark.net',
action='store', help='deploy to this directory')
parser_deploy.add_argument(
'-k', '--signingkey', type=str,
default='~/.bismark_signing_key.pem',
action='store', help='sign Packages.gz with this key')
parser_deploy.set_defaults(handler=subcommands.deploy)
parser_deploy = subparsers.add_parser(
'check', help='check validity of the release configuration')
parser_deploy.set_defaults(handler=subcommands.check)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename=args.logfile,
level=getattr(logging, args.loglevel))
releases_tree = tree.BismarkReleasesTree(os.path.expanduser(args.root))
args.handler(releases_tree, args)
if __name__ == '__main__':
main()
| mit | -1,297,748,715,572,889,900 | 49.621387 | 109 | 0.686212 | false |
Leonidas-from-XIV/whatsonair | parsers/fm4.py | 1 | 1241 | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
import base
class FM4Parser(base.StationBase):
"""The Parser for the austrian sidestream radio station
FM4, which is part of ORF.
Look at it's homepage http://fm4.orf.at
Maybe besser use this songlist?
http://fm4.orf.at/trackservicepopup/main
But then we loose the ability to parse OE3 as well"""
__station__ = 'FM4'
def __init__(self, url='http://hop.orf.at/img-trackservice/fm4.html',
stream='mms://stream1.orf.at/fm4_live'):
base.StationBase.__init__(self, url)
def parse(self):
"""Call feed first"""
# get the titles and the artists
soup = base.Soup(self.pagecontent)
titles = [node.string for node in
base.select(soup, 'span.tracktitle')]
artists = [node.string for node in
base.select(soup, 'span.artist')]
# combine these
combined = zip(artists, titles)
# get the last artist and title
self.artist, self.title = combined[-1]
def current_track(self):
return u"%s - %s" % (self.artist, self.title)
Parser = FM4Parser
if __name__ == '__main__':
base.test_parser(Parser, 'fm4.html')
| gpl-3.0 | -8,550,235,871,334,491,000 | 28.547619 | 73 | 0.593876 | false |
plotly/plotly.py | packages/python/plotly/plotly/validators/barpolar/marker/_colorbar.py | 1 | 11475 | import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="barpolar.marker", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format.
And for dates see:
https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's
date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for
fractional seconds with n digits. For example,
*2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.barpola
r.marker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.barpolar.marker.colorbar.tickformatstopdefaul
ts), sets the default property values to use
for elements of
barpolar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.barpolar.marker.co
lorbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
barpolar.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
barpolar.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| mit | 1,426,902,938,938,346,500 | 46.8125 | 88 | 0.525229 | false |
RicardoJohann/frappe | frappe/utils/user.py | 1 | 10944 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
from frappe.utils import cint
from frappe.boot import get_allowed_reports
from frappe.permissions import get_roles, get_valid_perms
from frappe.core.doctype.domain_settings.domain_settings import get_active_modules
class UserPermissions:
"""
A user permission object can be accessed as `frappe.get_user()`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
self.setup_user()
def setup_user(self):
def get_user_doc():
user = None
try:
user = frappe.get_doc("User", self.name).as_dict()
except frappe.DoesNotExistError:
pass
except Exception as e:
# install boo-boo
if not frappe.db.is_table_missing(e): raise
return user
if not frappe.flags.in_install_db and not frappe.flags.in_test:
user_doc = frappe.cache().hget("user_doc", self.name, get_user_doc)
if user_doc:
self.doc = frappe.get_doc(user_doc)
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
active_domains = frappe.get_active_domains()
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, restrict_to_domain, module from tabDocType""", as_dict=1):
if (not r.restrict_to_domain) or (r.restrict_to_domain in active_domains):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
for r in get_valid_perms():
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
no_list_view_link = []
active_modules = get_active_modules() or []
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
# read_only = "User Cannot Search"
self.all_read.append(dt)
no_list_view_link.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if dtp.get('module') not in self.allow_modules:
if active_modules and dtp.get('module') not in active_modules:
pass
else:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
for dt in no_list_view_link:
if dt in self.can_read:
self.can_read.remove(dt)
if "System Manager" in self.get_roles():
self.can_import = filter(lambda d: d in self.can_create,
frappe.db.sql_list("""select name from `tabDocType` where allow_import = 1"""))
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().hget("user_recent", self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().hset("user_recent", self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, creation,
email_signature, user_type, language, background_image, background_style,
mute_sounds, send_me_a_copy from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().hget("user_recent", self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
d.all_reports = self.get_all_reports()
return d
def get_all_reports(self):
return get_allowed_reports()
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar, name = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image", "name"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar,
"name": name
})
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""SELECT DISTINCT `name`, `creation`,
CONCAT_WS(' ',
CASE WHEN `first_name`= '' THEN NULL ELSE `first_name` END,
CASE WHEN `last_name`= '' THEN NULL ELSE `last_name` END
) AS fullname
FROM `tabUser` AS p
WHERE `docstatus` < 2
AND `enabled` = 1
AND `name` NOT IN ({})
AND exists
(SELECT *
FROM `tabHas Role` AS ur
WHERE ur.parent = p.name
AND ur.role='System Manager')
ORDER BY `creation` DESC""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None, send_welcome_email=False):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User",
"send_welcome_email": 1 if send_welcome_email else 0
})
user.insert()
# add roles
roles = frappe.get_all('Role',
fields=['name'],
filters={
'name': ['not in', ('Administrator', 'Guest', 'All')]
}
)
roles = [role.name for role in roles]
user.add_roles(*roles)
def get_enabled_system_users():
# add more fields if required
return frappe.get_all('User',
fields=['email', 'language', 'name'],
filters={
'user_type': 'System User',
'enabled': 1,
'name': ['not in', ('Administrator', 'Guest')]
}
)
def is_website_user():
return frappe.db.get_value('User', frappe.session.user, 'user_type') == "Website User"
def is_system_user(username):
return frappe.db.get_value("User", {"name": username, "enabled": 1, "user_type": "System User"})
def get_users():
from frappe.core.doctype.user.user import get_system_users
users = []
system_managers = frappe.utils.user.get_system_managers(only_name=True)
for user in get_system_users():
users.append({
"full_name": frappe.utils.user.get_user_fullname(user),
"email": user,
"is_system_manager": 1 if (user in system_managers) else 0
})
return users
def set_last_active_to_now(user):
from frappe.utils import now_datetime
frappe.db.set_value("User", user, "last_active", now_datetime())
def disable_users(limits=None):
if not limits:
return
if limits.get('users'):
system_manager = get_system_managers(only_name=True)[-1]
#exclude system manager from active user list
active_users = frappe.db.sql_list("""select name from tabUser
where name not in ('Administrator', 'Guest', %s) and user_type = 'System User' and enabled=1
order by creation desc""", system_manager)
user_limit = cint(limits.get('users')) - 1
if len(active_users) > user_limit:
# if allowed user limit 1 then deactivate all additional users
# else extract additional user from active user list and deactivate them
if cint(limits.get('users')) != 1:
active_users = active_users[:-1 * user_limit]
for user in active_users:
frappe.db.set_value("User", user, 'enabled', 0)
from frappe.core.doctype.user.user import get_total_users
if get_total_users() > cint(limits.get('users')):
reset_simultaneous_sessions(cint(limits.get('users')))
frappe.db.commit()
def reset_simultaneous_sessions(user_limit):
for user in frappe.db.sql("""select name, simultaneous_sessions from tabUser
where name not in ('Administrator', 'Guest') and user_type = 'System User' and enabled=1
order by creation desc""", as_dict=1):
if user.simultaneous_sessions < user_limit:
user_limit = user_limit - user.simultaneous_sessions
else:
frappe.db.set_value("User", user.name, "simultaneous_sessions", 1)
user_limit = user_limit - 1
def get_link_to_reset_password(user):
link = ''
if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
user = frappe.get_doc("User", user)
link = user.reset_password(send_email=False)
frappe.db.commit()
return {
'link': link
}
| mit | -2,402,520,111,732,548,000 | 28.106383 | 111 | 0.660636 | false |
rwl/PyCIM | CIM15/IEC61970/Informative/InfWork/CUContractorItem.py | 1 | 3475 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class CUContractorItem(IdentifiedObject):
"""Compatible unit contractor item.Compatible unit contractor item.
"""
def __init__(self, bidAmount=0.0, activityCode='', CompatibleUnits=None, status=None, *args, **kw_args):
"""Initialises a new 'CUContractorItem' instance.
@param bidAmount: The amount that a given contractor will charge for performing this unit of work.
@param activityCode: Activity code identifies a specific and distinguishable unit of work.
@param CompatibleUnits:
@param status:
"""
#: The amount that a given contractor will charge for performing this unit of work.
self.bidAmount = bidAmount
#: Activity code identifies a specific and distinguishable unit of work.
self.activityCode = activityCode
self._CompatibleUnits = []
self.CompatibleUnits = [] if CompatibleUnits is None else CompatibleUnits
self.status = status
super(CUContractorItem, self).__init__(*args, **kw_args)
_attrs = ["bidAmount", "activityCode"]
_attr_types = {"bidAmount": float, "activityCode": str}
_defaults = {"bidAmount": 0.0, "activityCode": ''}
_enums = {}
_refs = ["CompatibleUnits", "status"]
_many_refs = ["CompatibleUnits"]
def getCompatibleUnits(self):
return self._CompatibleUnits
def setCompatibleUnits(self, value):
for p in self._CompatibleUnits:
filtered = [q for q in p.CUContractorItems if q != self]
self._CompatibleUnits._CUContractorItems = filtered
for r in value:
if self not in r._CUContractorItems:
r._CUContractorItems.append(self)
self._CompatibleUnits = value
CompatibleUnits = property(getCompatibleUnits, setCompatibleUnits)
def addCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self not in obj._CUContractorItems:
obj._CUContractorItems.append(self)
self._CompatibleUnits.append(obj)
def removeCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self in obj._CUContractorItems:
obj._CUContractorItems.remove(self)
self._CompatibleUnits.remove(obj)
status = None
| mit | -7,500,218,546,023,297,000 | 40.86747 | 108 | 0.696691 | false |
TusharAgey/seventhsem | AI/search_algos/home/dfs.py | 1 | 1653 | import json
class MyStack: # just an implementation of a queue
def __init__(self):
self.elements = []
def push(self,val):
self.elements.append(val)
def pop(self):
val = None
try:
val = self.elements[len(self.elements) - 1]
if len(self.elements) == 1:
self.elements = []
else:
self.elements.reverse()
self.elements = self.elements[1:]
self.elements.reverse()
except:
pass
return val
def IsEmpty(self):
result = False
if len(self.elements) == 0:
result = True
return result
def getNeighbours(nextElem, arrOfArr, visited):
elems = []
i = ord(nextElem) - ord('A')
x = 0
for j in arrOfArr[i]:
if j > 0:
data = chr(x + ord('A'))
if data not in visited:
elems.append(data)
x += 1
return elems
def dfs(input):
visited = []
start = 'A' #considering A as start node always & element with 0 heuristic as goal node
#{"edges": [[0, 3, 4, -1, -1], [-1, 0, 5, 6, 7], [-1, -1, 0, 1, 2], [-1, -1, -1, 0, 1], [-1, -1, -1, -1, 0]]}
for elem in input['heuristics']:
for data in elem:
if elem[data] == 0:
goal = data
finalPath = []
finalPath.append(start)
stack = MyStack()
stack.push(start)
neighbours = []
while stack.IsEmpty() == False:
nextElem = stack.pop()
if nextElem not in finalPath:
finalPath.append(nextElem)
neighbours = getNeighbours(nextElem, input['edges'], finalPath)
for elem in neighbours:
if elem not in finalPath:
stack.push(elem)
print finalPath
return finalPath
js=open('./data/input.json')
data=json.load(js)
finalPath = {"path" : []}
finalPath['path'] = dfs(data)
with open('./data/DFS.json', 'w') as fp:
json.dump(finalPath, fp) | gpl-3.0 | 2,728,845,798,203,362,300 | 24.446154 | 110 | 0.628554 | false |
sryza/freewaydata | python/traveltime.py | 1 | 2503 | import numpy as np
import pandas as pd
def datetime64_to_microseconds(dt):
return dt.astype('uint64')
def travel_time(start_time, path, measurements_by_station, station_metadata, time_granularity=60*60):
"""Calculate the travel time along the given path at the given start time
Args:
path - list of station IDs that must be traversed to reach the destination
start_time - start time datetime64
station_data - dataframes grouped by station
time_granularity - granularity of samples in seconds
"""
time_granularity *= 1000000 # convert to microseconds
time = datetime64_to_microseconds(start_time)
total_dist = 0
for i in range(len(path)-1):
# calculate how long it takes to get to the next station based on the
# current time
sid1 = path[i]
sid2 = path[i+1]
measurements = measurements_by_station[sid1]
quantized = np.datetime64(time - time % time_granularity)
filtered = measurements[measurements['timestamp'] == quantized]
speed = filtered.iloc[0]['avgspeed']
if np.isnan(speed):
return (np.nan, np.nan)
station1_metadata = station_metadata.loc[sid1]
station2_metadata = station_metadata.loc[sid2]
dist = abs(station1_metadata['Abs_PM'] - station2_metadata['Abs_PM'])
total_dist += dist
# TODO: what if speed is NAN? interpolate
time += 1000000 * 60 * 60 * dist / speed
return (total_dist, np.datetime64(time) - start_time)
def test_travel_time():
path = [213, 224, 285, 485]
station_metadata = pd.DataFrame({'Abs_PM' : pd.Series([0, 60, 75, 85], index=[213, 224, 285, 485])})
base_time = np.datetime64('2013-01-01')
hour = np.timedelta64(1000000 * 60 * 60)
times = pd.Series([base_time, base_time + hour], index=range(2))
speeds = [[40, np.nan], [np.nan, 60], [np.nan, 120], [np.nan, np.nan]]
samples_by_station = {path[i] : pd.DataFrame({'timestamp' : times, 'avgspeed' : speeds[i]}) for i in range(len(path))}
start_time = base_time + np.timedelta64(5 * 1000000 * 60) # start at 5 minutes past the hour
# Traveling 60 miles at 40 MPH should put us in the next hour (total time = 1:35)
# Then traveling 15 miles at 60 MPH should keep us in the same hour (total time = 1:50)
# Then 10 miles at 120 MPH should get us to our destination (total time = 1:55)
# Travel time is 1:55 minus the 5 minutes past the hour we started at, so 1:50
print travel_time(start_time, path, samples_by_station, station_metadata)
if __name__ == '__main__':
test_travel_time()
| apache-2.0 | -3,499,258,868,963,724,300 | 40.716667 | 120 | 0.681582 | false |
50wu/gpdb | gpMgmt/test/behave/mgmt_utils/steps/replication_slots_utils.py | 7 | 5201 | import os
from behave import given, when, then
from test.behave_utils.utils import (
stop_database,
run_command,
stop_primary,
query_sql,
wait_for_unblocked_transactions,
)
from test.behave.mgmt_utils.steps.mirrors_mgmt_utils import (add_three_mirrors)
def assert_successful_command(context):
if context.ret_code != 0:
raise Exception('%s : %s' % (context.error_message, context.stdout_message))
def create_cluster(context, with_mirrors=True):
context.initial_cluster_size = 3
context.current_cluster_size = context.initial_cluster_size
os.environ['PGPORT'] = '15432'
cmd = """
cd ../gpAux/gpdemo; \
export DEMO_PORT_BASE={port_base} && \
export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} && \
export WITH_MIRRORS={with_mirrors} && \A
./demo_cluster.sh -d && ./demo_cluster.sh -c && \
./demo_cluster.sh
""".format(port_base=os.getenv('PORT_BASE', 15432),
num_primary_mirror_pairs=os.getenv(
'NUM_PRIMARY_MIRROR_PAIRS', context.initial_cluster_size),
with_mirrors=('true' if with_mirrors else 'false'))
run_command(context, cmd)
assert_successful_command(context)
def ensure_temp_directory_is_empty(context, temp_directory):
run_command(context, "rm -rf /tmp/{temp_directory}".format(
temp_directory=temp_directory))
def expand(context):
ensure_temp_directory_is_empty(context, "behave_test_expansion_primary")
ensure_temp_directory_is_empty(context, "behave_test_expansion_mirror")
expansion_command = """gpexpand --input <(echo '
localhost|localhost|25438|/tmp/behave_test_expansion_primary|8|3|p
localhost|localhost|25439|/tmp/behave_test_expansion_mirror|9|3|m
')
"""
# Initialize
run_command(context, expansion_command)
assert_successful_command(context)
# Redistribute tables
run_command(context, expansion_command)
assert_successful_command(context)
def ensure_primary_mirror_switched_roles():
results = query_sql(
"postgres",
"select * from gp_segment_configuration where preferred_role <> role"
)
if results.rowcount != 2:
raise Exception("expected 2 segments to not be in preferred roles")
@given('I have a machine with no cluster')
def step_impl(context):
stop_database(context)
@given('a mirror has crashed')
@when('a mirror has crashed')
def step_impl(context):
host, datadir = query_sql("postgres",
"SELECT hostname, datadir FROM gp_segment_configuration WHERE role='m' AND content=0"
).fetchone()
# NOTE that these commands are manually escaped; beware when adding dollar
# signs or double-quotes!
cmd = "ps aux | grep '[p]ostgres .* %s' | awk '{print \$2}' | xargs kill -9" % datadir
cmd = 'ssh %s "%s"' % (host, cmd)
run_command(context, cmd)
# If the kill succeeds, recover the mirror when this test is done.
def recover():
run_command(context, "gprecoverseg -a")
context.add_cleanup(recover)
wait_for_unblocked_transactions(context)
@when('I create a cluster')
def step_impl(context):
create_cluster(context)
@then('the primaries and mirrors should be replicating using replication slots')
def step_impl(context):
result_cursor = query_sql(
"postgres",
"select pg_get_replication_slots() from gp_dist_random('gp_id') order by gp_segment_id"
)
if result_cursor.rowcount != context.current_cluster_size:
raise Exception("expected all %d primaries to have replication slots, only %d have slots" % (context.current_cluster_size, results.rowcount))
for content_id, result in enumerate(result_cursor.fetchall()):
pg_rep_slot = result[0]
if (pg_rep_slot[0], pg_rep_slot[2], pg_rep_slot[4]) != ('internal_wal_replication_slot','physical','f') :
raise Exception(
"expected replication slot to be active for content id %d, got %s" %
(content_id, result[0])
)
@then('the mirrors should not have replication slots')
def step_impl(context):
result_cursor = query_sql(
"postgres",
"select datadir from gp_segment_configuration where role='m';"
)
for content_id, result in enumerate(result_cursor.fetchall()):
path_to_replslot = os.path.join(result[0], 'pg_replslot')
if len(os.listdir(path_to_replslot)) > 0:
raise Exception("expected replication slot directory to be empty")
@given('a preferred primary has failed')
def step_impl(context):
stop_primary(context, 0)
wait_for_unblocked_transactions(context)
@when('primary and mirror switch to non-preferred roles')
def step_impl(context):
ensure_primary_mirror_switched_roles()
@given("I cluster with no mirrors")
def step_impl(context):
create_cluster(context, with_mirrors=False)
@when("I add mirrors to the cluster")
def step_impl(context):
add_three_mirrors(context)
@given("I create a cluster")
def step_impl(context):
create_cluster(context, with_mirrors=True)
@when("I add a segment to the cluster")
def step_imp(context):
context.current_cluster_size = 4
expand(context)
| apache-2.0 | -1,375,849,752,420,686,300 | 30.143713 | 149 | 0.671025 | false |
ahri/pycurlbrowser | pycurlbrowser/rest_client.py | 1 | 3465 | # coding: utf-8
"""
REST functionality based off pycurlbrowser's Browser.
"""
try:
import simplejson as json
except ImportError:
import json
from . import Browser
class StatusInformational(Exception):
"""
Represent 1xx status codes
"""
class StatusRedirection(Exception):
"""
Represent 3xx status codes
"""
class StatusClientError(Exception):
"""
Represent 4xx status codes
"""
class StatusServerError(Exception):
"""
Represent 5xx status codes
"""
def status_factory(status):
"""Post exceptions based on HTTP status codes"""
if 100 <= status < 200:
return StatusInformational()
elif 300 <= status < 400:
return StatusRedirection()
elif 400 <= status < 500:
return StatusClientError()
elif 500 <= status < 600:
return StatusServerError()
raise ValueError("Unsupported error code: %d" % status)
class RestClient(Browser):
"""
A simple REST client based upon pycurlbrowser
"""
def __init__(self, base, *args, **kwargs):
super(RestClient, self).__init__(*args, **kwargs)
self.base = base
def go(self, obj, method, uid=None, data=None, headers=None):
url = '%(base)s/%(obj)s' % {'base': self.base,
'obj' : obj}
if uid is not None:
url += '/%s' % uid
super(RestClient, self).go(url=url,
method=method,
data=data,
headers=headers)
if self.http_code != 200:
raise status_factory(self.http_code)
return self.http_code
# CRUD
def post(self, obj, data=None, headers=None):
"""Post"""
self.go(obj, 'POST', data=data, headers=headers)
return self.src
def get(self, obj, uid=None, headers=None):
"""Get"""
self.go(obj, 'GET', uid=uid, headers=headers)
return self.src
def head(self, obj, uid=None, headers=None):
"""Head"""
# TODO: care about headers
# TODO: think about self._curl.setopt(pycurl.NOBODY, 1)
self.go(obj, 'HEAD', uid=uid, headers=headers)
def put(self, obj, uid, data=None, headers=None):
"""Put"""
self.go(obj, 'PUT', uid=uid, data=data, headers=headers)
return self.src
def delete(self, obj, uid, headers=None):
"""Delete"""
# TODO: care about headers
self.go(obj, 'DELETE', uid=uid, headers=headers)
return self.src
class RestClientJson(RestClient):
"""
A REST client that only speaks JSON
"""
def post(self, obj, data=None):
"""Post"""
res = super(RestClientJson, self).post(obj, json.dumps(data), headers={'Content-Type': 'text/json'})
if len(res) > 0:
return json.loads(res)
return None
def get(self, obj, uid=None):
"""Get"""
return json.loads(super(RestClientJson, self).get(obj, uid))
def put(self, obj, uid, data=None):
"""Put"""
res = super(RestClientJson, self).put(obj, uid, json.dumps(data), headers={'Content-Type': 'text/json'})
if len(res) > 0:
return json.loads(res)
return None
def delete(self, obj, uid):
"""Delete"""
res = super(RestClientJson, self).delete(obj, uid)
if len(res) > 0:
return json.loads(res)
return None
| agpl-3.0 | 3,467,552,450,697,954,300 | 24.858209 | 112 | 0.561328 | false |
luoguanyang/google-python-exercises | basic/wordcount.py | 1 | 2854 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
def wordcountDict(filename):
dict={}
f=open(filename,'rU')
for line in f:
lowerline=line.lower()
words=lowerline.split()
for word in words:
if not (word in dict):
dict[word]=1
else:
dict[word] +=1
f.close()
return dict
def print_words(filename):
dict= wordcountDict(filename)
keys=sorted(dict.keys())
for key in keys:
print key+' '+str(dict[key])
def print_top(filename):
dict=wordcountDict(filename)
def sortByCount(key):
return dict[key]
keys=sorted(dict,key=sortByCount)#dict is a list of keys?
keys.reverse()
for key in keys[:20]:
print key+' '+str(dict[key])
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | 5,757,111,439,567,327,000 | 28.729167 | 79 | 0.6822 | false |
nzjoel1234/sprinkler | driver/input_thread.py | 1 | 3639 | import threading
SIMULATED = False
try:
import Adafruit_CharLCD as Lcd
except ImportError:
import lcd_simulator as Lcd
SIMULATED = True
DEBOUNCE_THRESHOLD = 2
SCREEN_TIMEOUT = 60
class InputThreadWrapper(threading.Thread):
def __init__(self, is_button_pressed, buttons, create_home_screen):
threading.Thread.__init__(self)
self._is_button_pressed = is_button_pressed
self._buttons = buttons
self._create_home_screen = create_home_screen
self._stop_event = threading.Event()
self._timeout_stop_event = threading.Event()
self._debounce = {}
self._button_latched = {}
self._view_model = None
self._timeout_lock = threading.RLock()
self._timeout_counter = 0
self._enabled = False
def _start_screen_timeout(self):
with self._timeout_lock:
self._timeout_counter += 1
self._timeout_stop_event.set()
self._timeout_stop_event = threading.Event()
timeout_counter = self._timeout_counter
target = lambda: self._wait_for_screen_timeout(self._timeout_stop_event, timeout_counter)
threading.Thread(target=target).start()
def _wait_for_screen_timeout(self, _stop_event, timeout_counter):
_stop_event.wait(SCREEN_TIMEOUT)
with self._timeout_lock:
if timeout_counter == self._timeout_counter:
self.set_enabled(False)
def set_enabled(self, enabled):
if not self._view_model is None:
self._view_model.set_enabled(enabled)
if not enabled:
self._timeout_stop_event.set()
self._view_model = None
self._enabled = enabled
def set_view_model(self, new_view_model=None):
if not self._view_model is None:
self._view_model.set_enabled(False)
if new_view_model is None:
new_view_model = self._create_home_screen(self.set_view_model)
self._view_model = new_view_model
new_view_model.set_enabled(self._enabled)
def on_button_press(self, button):
if button == Lcd.SELECT:
self.set_enabled(not self._enabled)
if not self._enabled:
return
if self._view_model is None:
self.set_view_model()
self._start_screen_timeout()
if button == Lcd.LEFT:
self._view_model.on_left_pressed()
elif button == Lcd.RIGHT:
self._view_model.on_right_pressed()
elif button == Lcd.UP:
self._view_model.on_up_pressed()
elif button == Lcd.DOWN:
self._view_model.on_down_pressed()
def run(self):
while not self._stop_event.is_set():
for button in self._buttons:
if not button in self._debounce:
self._debounce[button] = 0
self._button_latched[button] = False
if self._is_button_pressed(button) \
and self._debounce[button] < DEBOUNCE_THRESHOLD:
self._debounce[button] += 1
elif self._debounce[button] > 0:
self._debounce[button] -= 1
if self._debounce[button] == 0:
self._button_latched[button] = False
if self._debounce[button] == DEBOUNCE_THRESHOLD \
and not self._button_latched[button]:
self._button_latched[button] = True
self.on_button_press(button)
self._stop_event.wait(0.01)
def stop(self):
self._stop_event.set()
self._timeout_stop_event.set()
| mit | -8,851,845,360,741,925,000 | 35.39 | 97 | 0.573509 | false |
eladnoor/small-molecule-regulation | oldcode/meta_analysis_clustering.py | 1 | 1640 | # -*- coding: utf-8 -*-
# Cluster and compare incidence of activation and inhibition across species
import settings as S
import pandas as pd
import os
import numpy as np
import pdb
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sns
plt.ion()
plt.close('all')
# Minimum number of interactions required to print data
minval = 3
ki = S.read_cache('inhibiting')
act = S.read_cache('activating')
tax = S.read_cache('TaxonomicData_temp')
# Drop entries without organism
ki = ki[pd.notnull(ki['Organism'])]
act = act[pd.notnull(act['Organism'])]
# Convert LigandID to string
ki['LigandID'] = ki['LigandID'].astype(str)
act['LigandID'] = act['LigandID'].astype(str)
# Drop null values
ki = ki[pd.notnull(ki['LigandID'])]
act = act[pd.notnull(act['LigandID'])]
# We don't want duplicate measurements of the same EC:LigandID in the same organism
ki.index = [':'.join( [ki.at[row,'EC_number'],ki.at[row,'LigandID'],ki.at[row,'Organism']] ) for row in ki.index]
act.index = [':'.join([act.at[row,'EC_number'], act.at[row,'LigandID'], act.at[row,'Organism'] ]) for row in act.index]
ki = ki[~ki.index.duplicated()]
act = act[~act.index.duplicated()]
# Make tables
print('Cross tabulating...')
kitab = pd.crosstab(ki.EC_number, ki.LigandID)
acttab = pd.crosstab(act.EC_number, act.LigandID)
# Drop indices where row or column sums equal zero
kitab = kitab.loc[(kitab.sum(axis=1) > minval), (kitab.sum(axis=0) >minval)]
acttab = acttab.loc[(acttab.sum(axis=1) > minval), (acttab.sum(axis=0) >minval)]
print('Writing to file...')
kitab.to_csv('../cache/inh_crosstab.csv')
acttab.to_csv('../cache/act_crosstab.csv') | mit | 1,391,822,448,769,291,500 | 28.836364 | 119 | 0.704268 | false |
yawd/yawd-elfinder | elfinder/widgets.py | 1 | 4686 | import json
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.widgets import Input
from django.utils.safestring import mark_safe
from django.utils.translation import to_locale, get_language, ugettext as _
from fields import ElfinderFile
from conf import settings as ls
class ElfinderWidget(Input):
"""
A widget that opens the elfinder file manager for selecting a file.
``attrs``
The TextInput attrs
``options``
Optional. Sets the elfinder (client) configuration options
``optionset``
The key of the ELFINDER_CONNECTOR_OPTION_SETS setting to use as connector settings
"""
input_type = 'hidden'
def __init__(self, optionset, start_path, attrs={'size':'42'}, options={}):
self.options, self.optionset, self.start_path = options, optionset, start_path
super(ElfinderWidget, self).__init__(attrs)
#locate current locale
self.current_locale = to_locale(get_language())
def _media(self):
"""
Set the widget's javascript and css
"""
js = [ls.ELFINDER_JS_URLS[x] for x in sorted(ls.ELFINDER_JS_URLS)] + [ls.ELFINDER_WIDGET_JS_URL]
screen_css = [ls.ELFINDER_CSS_URLS[x] for x in sorted(ls.ELFINDER_CSS_URLS)] + [ls.ELFINDER_WIDGET_CSS_URL]
#add language file to javascript media
if not self.current_locale.startswith('en') and self.current_locale in ls.ELFINDER_LANGUAGES:
js.append('%selfinder.%s.js' % (ls.ELFINDER_LANGUAGES_ROOT_URL, self.current_locale))
return forms.Media(css= {'screen': screen_css}, js = js)
media = property(_media)
def render(self, name, value, attrs=None):
"""
Display the widget
"""
#if self.optionset in ls.ELFINDER_CONNECTOR_OPTION_SETS and 'uploadAllow' in ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset] and ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset]['uploadAllow']:
# html = '<div class="elfinder_filetypes">(' + _('Allowed mime types: ') + str(ls.ELFINDER_CONNECTOR_OPTION_SETS[self.optionset]['uploadAllow']) + ')</div>'
#update the elfinder client options
self.options.update({
'url' : reverse('yawdElfinderConnectorView', args=[
self.optionset,
'default' if self.start_path is None else self.start_path
]),
'rememberLastDir' : True if not self.start_path else False,
})
if not 'rmSoundUrl' in self.options:
self.options['rmSoundUrl'] = '%selfinder/sounds/rm.wav' % settings.STATIC_URL
#update the elfinder client language
if not self.current_locale.startswith('en') and self.current_locale in ls.ELFINDER_LANGUAGES:
self.options.update({ 'lang' : self.current_locale })
if value:
if not isinstance(value, ElfinderFile):
value = ElfinderFile(hash_=value, optionset=self.optionset)
file_ = 'file : %s' % json.dumps(value.info)
else:
file_ = 'file : {}'
elfinder = 'elfinder : %s' % json.dumps(self.options)
html = ('%(super)s\n'
'<script>\n'
' (function($) {\n'
' $(document).ready( function() {\n'
' $("#%(id)s").elfinderwidget({\n'
' %(file)s,\n'
' %(elfinder)s,\n'
' keywords : { size : "%(size)s", path : "%(path)s", link : "%(link)s", modified : "%(modified)s", dimensions : "%(dimensions)s", update : "%(update)s", set : "%(set)s", clear : "%(clear)s" }'
' });\n'
' })\n'
' })(yawdelfinder.jQuery)\n'
'</script>' % {
'super' : super(ElfinderWidget, self).render(name, value, attrs),
'id' : attrs['id'],
'file' : file_,
'elfinder' : elfinder,
#these keywords are optional, since they are initialized in elfinderwidget
#we override them for localization purposes
'size' : _('Size'),
'path' : _('Path'),
'link' : _('Link'),
'modified' : _('Modified'),
'dimensions' : _('Dimensions'),
'update' : _('Update'),
'set' : _('Set'),
'clear' : _('Clear')
})
return mark_safe(html)
| bsd-3-clause | 8,121,584,248,681,799,000 | 43.207547 | 223 | 0.541827 | false |
SalesforceFoundation/HEDAP | robot/EDA/resources/EDA.py | 1 | 27470 | import logging
import time
import warnings
from BaseObjects import BaseEDAPage
from cumulusci.robotframework.utils import selenium_retry, capture_screenshot_on_error
from robot.libraries.BuiltIn import RobotNotRunningError
from robot.utils import lower
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.keys import Keys
from locators_51 import eda_lex_locators as locators_51
from locators_50 import eda_lex_locators as locators_50
locators_by_api_version = {
51.0: locators_51, # Spring '21
50.0: locators_50 # Winter '21
}
# will get populated in _init_locators
eda_lex_locators = {}
@selenium_retry
class EDA(BaseEDAPage):
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = 1.0
def __init__(self, debug=False):
self.debug = debug
self.current_page = None
self._session_records = []
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
self._init_locators()
def _init_locators(self):
try:
client = self.cumulusci.tooling
response = client._call_salesforce(
'GET', 'https://{}/services/data'.format(client.sf_instance)
)
self.latest_api_version = float(response.json()[-1]['version'])
if self.latest_api_version not in locators_by_api_version:
warnings.warn("Could not find locator library for API %d" % self.latest_api_version)
self.latest_api_version = max(locators_by_api_version.keys())
except RobotNotRunningError:
# We aren't part of a running test, likely because we are
# generating keyword documentation. If that's the case, assume
# the latest supported version
self.latest_api_version = max(locators_by_api_version.keys())
locators = locators_by_api_version[self.latest_api_version]
eda_lex_locators.update(locators)
def populate_address(self, loc, value):
""" Populate address with Place Holder aka Mailing Street etc as a locator
and actual value of the place holder.
"""
xpath = eda_lex_locators["mailing_address"].format(loc)
field = self.selenium.get_webelement(xpath)
field.send_keys(value)
def click_record_button(self, title):
""" Pass title of the button to click the buttons on the records edit page. Most common buttons are: save and cancel.
"""
locator = eda_lex_locators["record"]["button"].format(title)
self.selenium.set_focus_to_element(locator)
button = self.selenium.get_webelement(locator)
button.click()
def click_dropdown(self, title):
locator = eda_lex_locators["record"]["list"].format(title)
self.selenium.set_focus_to_element(locator)
self.selenium.get_webelement(locator).click()
def pick_date(self, value):
"""To pick a date from the date picker"""
locator = eda_lex_locators["record"]["datepicker"].format(value)
self.selenium.set_focus_to_element(locator)
self.selenium.get_webelement(locator).click()
def click_edit_button(self, title):
locator = eda_lex_locators["record"]["edit_button"].format(title)
self.selenium.get_webelement(locator).click()
def click_run_action_button(self,text):
""" This method clicks the any action button (blue in color) present in EDA settings sub
tabs. Pass the name of the button from robot file.
"""
locator = eda_lex_locators["eda_settings"]["action_button"].format(text)
self.selenium.wait_until_page_contains_element(
locator, error=f"Run action button with locator '{locator}' is not available")
self.selenium.click_element(locator)
def click_add_new_setting_button(self,setting_type,button):
""" This method clicks the add setting/mapping button (blue in color) present in EDA settings sub
tabs in both edit and read mode. Pass the name of the setting and name of the button
from robot file.
"""
locator = eda_lex_locators["eda_settings"]["add_setting_button"].format(setting_type,button)
self.selenium.wait_until_page_contains_element(
locator, error=f"Run action button with locator '{locator}' is not available")
self.selenium.click_element(locator)
def verify_record(self, name):
""" Checks for the record in the object page and returns true if found else returns false
"""
locator = eda_lex_locators["account_list"].format(name)
self.selenium.page_should_contain_element(locator)
def header_field_value(self, title, value):
"""Validates if the specified header field has specified value"""
locator = eda_lex_locators["header_field_value"].format(title, value)
self.selenium.page_should_contain_element(locator)
def select_modal_checkbox(self, title):
""""""
locator = eda_lex_locators["modal"]["checkbox"].format(title)
self.selenium.get_webelement(locator).click()
def select_relatedlist(self, title):
"""click on the related list to open it"""
locator = eda_lex_locators["record"]["related"]["title"].format(title)
element = self.selenium.driver.find_element_by_xpath(locator)
self.selenium.driver.execute_script('arguments[0].click()', element)
def select_checkbox_in_eda_settings(self, loc_check, loc_checkbox):
""" Selects checkbox. Does nothing if checkbox is already checked """
if self._check_if_element_exists(loc_check):
return
else:
self.selenium.click_button("Edit")
self.selenium.get_webelement(loc_checkbox).click()
self.selenium.click_button("Save")
self.selenium.wait_until_element_is_visible(loc_check)
return
def _check_if_element_exists(self, xpath):
"""
Checks if the given xpath exists
this is only a helper function being called from other keywords
"""
elements = int(self.selenium.get_element_count(xpath))
return True if elements > 0 else False
def get_eda_locator(self, path, *args, **kwargs):
""" Returns a rendered locator string from the eda_lex_locators
dictionary. This can be useful if you want to use an element in
a different way than the built in keywords allow.
"""
locator = eda_lex_locators
for key in path.split('.'):
locator = locator[key]
main_loc = locator.format(*args, **kwargs)
return main_loc
def wait_for_new_window(self, title):
""" Waits for specified window to be available
by checking every 1 seconds for 25 times
"""
window_found = False
for i in range(25):
i += 1
time.sleep(1)
titles = self.selenium.get_window_titles()
for j in titles:
if j == title:
window_found = True
return window_found
if window_found:
return
else:
continue
self.builtin.log("Timed out waiting for window with title " + title)
return window_found
@capture_screenshot_on_error
def wait_for_locator(self, path, *args, **kwargs):
main_loc = self.get_eda_locator(path,*args, **kwargs)
self.selenium.wait_until_element_is_visible(main_loc, timeout=60)
@capture_screenshot_on_error
def click_on_element(self,path, *args, **kwargs):
main_loc = self.get_eda_locator(path,*args, **kwargs)
self.selenium.wait_until_element_is_visible(main_loc)
self.selenium.click_element(main_loc)
def java_click_on_element(self,path, *args, **kwargs):
main_loc = self.get_eda_locator(path,*args, **kwargs)
self.selenium.wait_until_element_is_visible(main_loc)
# javascript is being used here because the usual selenium click is highly unstable for this element on MetaCI
self.selenium.driver.execute_script(
"arguments[0].click()",
self.selenium.driver.find_element_by_xpath(main_loc))
time.sleep(1)
def format_all(self, loc, value):
""" Formats the given locator with the value for all {} occurrences """
count = 0
for s in loc:
if s is '{':
count += 1
if count == 1:
return loc.format(value)
elif count == 2:
return loc.format(value, value)
elif count == 3:
return loc.format(value, value, value)
def populate_placeholder(self, loc, value):
""" Populate placeholder element as a locator
and actual value of the place holder.
"""
xpath = eda_lex_locators["input_placeholder"].format(loc)
field = self.selenium.get_webelement(xpath)
field.send_keys(value)
field.send_keys(Keys.ARROW_DOWN + Keys.ENTER)
def edit_eda_settings_checkbox(self, checkbox_label, checkbox_toggle):
""" Updates the checkbox_label value to checkbox_toggle in the EDA settings page """
locator_checkbox_default = eda_lex_locators["eda_settings"]["checkbox_default"].format(checkbox_label)
locator_checkbox = eda_lex_locators["eda_settings"]["checkbox"].format(checkbox_label)
locator_edit = eda_lex_locators["eda_settings"]["edit"]
locator_save = eda_lex_locators["eda_settings"]["save"]
checkbox_default = self.selenium.get_element_attribute(locator_checkbox_default, "alt")
if checkbox_default == checkbox_toggle:
return
else:
self.selenium.click_element(locator_edit)
self.selenium.wait_until_page_contains_element(
locator_checkbox,
error="Checkbox not found on the page"
)
self.selenium.click_element(locator_checkbox)
self.selenium.click_element(locator_save)
locator_toast = eda_lex_locators["success_message"].format("Settings successfully saved.")
self.selenium.wait_until_page_contains_element(locator_toast)
@capture_screenshot_on_error
def verify_toast_message(self, value):
""" Verifies the toast message """
locator = eda_lex_locators["toast_message"].format(value)
self.selenium.wait_until_page_contains_element(locator)
@capture_screenshot_on_error
def close_toast_message(self):
""" Close the toast message banner """
locator = eda_lex_locators["toast_close"]
if self._check_if_element_exists(locator):
self.salesforce._jsclick(locator)
#self.selenium.capture_page_screenshot()
def get_eda_namespace_prefix(self):
""" Returns the EDA namespace value if the target org is a managed org else returns blank value """
if not hasattr(self.cumulusci, '_describe_result'):
self.cumulusci._describe_result = self.cumulusci.sf.describe()
objects = self.cumulusci._describe_result['sobjects']
level_object = [o for o in objects if o['label'] == 'Program Plan'][0]
return self._get_namespace_prefix(level_object['name'])
def _get_namespace_prefix(self, name):
"""" This is a helper function to capture the EDA namespace prefix of the target org """
parts = name.split('__')
if parts[-1] == 'c':
parts = parts[:-1]
if len(parts) > 1:
return parts[0] + '__'
else:
return ''
def close_all_tabs(self):
""" Gets the count of the tabs that are open and closes them all """
locator = eda_lex_locators["close_tab"]
count = int(self.selenium.get_element_count(locator))
for i in range(count):
self.selenium.wait_until_element_is_visible(locator)
self.selenium.get_webelement(locator).click()
def click_on_element_if_exists(self, path, *args, **kwargs):
""" Clicks on the element if it exists
by building a locator using the path and args
but the keyword will not fail in case the element doesn't exist
"""
main_loc = self.get_eda_locator(path, *args, **kwargs)
if self._check_if_element_exists(main_loc):
self.selenium.click_element(main_loc)
@capture_screenshot_on_error
def select_tab(self, title):
""" Switch between different tabs on a record page like Related, Details, News, Activity and Chatter
Pass title of the tab
"""
tab_found = False
locators = eda_lex_locators["tabs"].values()
for i in locators:
locator = i.format(title)
if self._check_if_element_exists(locator):
print(locator)
buttons = self.selenium.get_webelements(locator)
for button in buttons:
print(button)
if button.is_displayed():
print("button displayed is {}".format(button))
self.salesforce._focus(button)
button.click()
time.sleep(5)
tab_found = True
break
assert tab_found, "tab not found"
def shift_to_default_content(self):
""" Returns to main content, and out of iframe """
self.selenium.driver.switch_to.default_content()
currentFrame = self.selenium.driver.execute_script("return self.name")
self.builtin.log(
"Current frame 2: " + currentFrame
)
return
def open_custom_settings(self, title, error_message, capture_screen):
""" Performs a wait until the element shows on the page, and clicks the element """
self.selenium.wait_until_page_contains_element(
eda_lex_locators["custom_settings_title"].format(title),
timeout=60,
error=error_message
)
self.selenium.click_element(eda_lex_locators["custom_settings_title"].format(title))
if capture_screen:
self.selenium.capture_page_screenshot()
def verify_app_exists(self, app):
"""Verifies that the given app is present in the app launcher"""
locator = eda_lex_locators["app_tile"].format(app)
self.selenium.wait_until_page_contains_element(locator, timeout=60, error=f'{app} did not open in 1 min')
def verify_item_exists(self, item):
"""Verifies that the given item is present in the app launcher"""
locator = eda_lex_locators["app_item"].format(item)
self.selenium.wait_until_page_contains_element(locator, timeout=60, error=f'{item} did not open in 1 min')
def select_frame_with_value(self, value):
""" Selects the first displayed iframe on the page identified by the given value
:param value should be the 'id', 'title' or 'name' attribute of the webelement used to identify the iframe
"""
locator = eda_lex_locators['frame'].format(value, value, value)
frames = self.selenium.get_webelements(locator)
for frame in frames:
if frame.is_displayed():
for i in range(10):
try:
self.selenium.select_frame(locator)
return
except WebDriverException:
self.builtin.log("Caught NoSuchWindowException; trying again..", "WARN")
i += 1
time.sleep(0.5)
continue
raise Exception('Unable to find an iframe with a name, title or id with value "{}"'.format(value))
def go_to_eda_settings_tab(self, tab):
""" Navigates to EDA settings URL and click on the tab passed by the parameter
and then loads the page object identified by the tab name
"""
url_pattern = "{root}/lightning/n/{object}"
object_name = "{}HEDA_Settings".format(self.get_eda_namespace_prefix())
url = url_pattern.format(root=self.cumulusci.org.lightning_base_url, object=object_name)
self.selenium.go_to(url)
self.salesforce.wait_until_loading_is_complete()
self.wait_for_locator("frame", "accessibility title", "accessibility title", "accessibility title")
self.select_frame_with_value("accessibility title")
locator_tab = eda_lex_locators["eda_settings"]["tab"].format(tab)
self.selenium.wait_until_page_contains_element(locator_tab, error=f"'{tab}' tab is not available on the page")
self.salesforce._jsclick(locator_tab)
tab = tab.replace(" ", "_")
self.pageobjects.load_page_object(tab, "HEDA_Settings")
def go_to_groups_home(self):
""" Navigates to the Home view of the groups tab """
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/o/CollaborationGroup/list?filterName=Recent".format(url)
self.selenium.go_to(url)
self.salesforce.wait_until_loading_is_complete()
def go_to_custom_settings_setup(self):
""" Navigates to the Home view of the custom settings tab in set up"""
url = self.cumulusci.org.lightning_base_url
url = "{}/lightning/setup/CustomSettings/home".format(url)
self.selenium.go_to(url)
self.salesforce.wait_until_loading_is_complete()
def click_edit_on_eda_settings_page(self):
locator = eda_lex_locators["eda_settings"]["edit"]
self.selenium.wait_until_page_contains_element(locator, error="Edit button is not available on the page")
self.selenium.wait_until_element_is_visible(locator)
self.selenium.click_element(locator)
@capture_screenshot_on_error
def click_action_button_on_eda_settings_page(self, action):
""" Clicks on the action (eg: Save, Cancel) button on the EDA Settings page """
locator = eda_lex_locators["eda_settings"]["action"].format(lower(action))
self.selenium.wait_until_page_contains_element(
locator, error=f"Action button with locator '{locator}' is not available on the EDA settings page")
self.salesforce._jsclick(locator)
if action == "Save":
self.verify_toast_message("Settings successfully saved.")
self.close_toast_message()
@capture_screenshot_on_error
def update_checkbox_value(self,**kwargs):
""" This method will update the checkbox field value passed in keyword arguments
Pass the expected value to be set in the checkbox field from the tests
true - checked, false - unchecked
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_program_plans"]["checkbox_read"].format(field)
self.selenium.wait_until_page_contains_element(locator, timeout=60)
self.selenium.wait_until_element_is_visible(locator)
actual_value = self.selenium.get_element_attribute(locator, "alt")
self.builtin.log("Locator " + locator + "actual value is " + actual_value)
if not str(actual_value).lower() == str(value).lower():
self.click_action_button_on_eda_settings_page("Edit")
locator_edit = eda_lex_locators["eda_settings_program_plans"]["checkbox_edit"].format(field)
self.selenium.wait_until_page_contains_element(locator_edit,
error=f"'{locator_edit}' is not available ")
for i in range(3):
self.builtin.log("Iteration: " + str(i) + "for locator" + locator_edit)
self.selenium.click_element(locator_edit)
time.sleep(1)
actual_value = self.selenium.get_element_attribute(locator_edit, "data-qa-checkbox-state")
if actual_value == str(value).lower():
self.builtin.log("The checkbox value in edit mode is" + actual_value)
self.builtin.log("Updated locator " + locator_edit)
break
self.click_action_button_on_eda_settings_page("Save")
time.sleep(0.25) #This wait is necessary to avoid toast message inconsistencies
def update_dropdown_value(self,**kwargs):
""" This method will update the drop down field value passed in keyword arguments
Pass the expected value to be set in the drop down field from the tests
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_cc"]["dropdown_values"].format(field,value)
self.selenium.wait_until_page_contains_element(locator,
error=f"'{value}' as dropdown value in '{field}' field is not available ")
self.selenium.click_element(locator)
def verify_selected_dropdown_value(self,**kwargs):
""" This method will confirm if the value to be set in dropdown field is retained after save action
Pass the expected value to be verified from the tests using keyword arguments
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_cc"]["updated_dropdown_value"].format(field,value)
self.selenium.wait_until_element_is_visible(locator,
error= "Element is not displayed for the user")
actual_value = self.selenium.get_webelement(locator).text
if not str(value).lower() == str(actual_value).lower() :
raise Exception (f"Drop down value in '{field}' is not updated and the value is '{actual_value}'")
def verify_dropdown_field_status(self, **kwargs):
""" Verify the drop down field is disabled/enabled for the user
we have to pass the name of the field and the expected status
of the field as either enabled or disabled
"""
for field,expected_value in kwargs.items():
locator = eda_lex_locators["eda_settings"]["dropdown_field"].format(field)
self.selenium.wait_until_page_contains_element(locator, timeout=60)
self.selenium.wait_until_element_is_visible(locator,
error= f"Element '{field}' is not displayed for the user")
actual_value = self.selenium.get_webelement(locator).get_attribute(expected_value)
expected_value = bool(expected_value == "disabled")
if not str(expected_value).lower() == str(actual_value).lower() :
raise Exception (f"Drop down field {field} status is {actual_value} instead of {expected_value}")
@capture_screenshot_on_error
def verify_checkbox_value(self,**kwargs):
""" This method validates the checkbox value for the field passed in kwargs
Pass the field name and expected value to be verified from the tests using
keyword arguments. true - checked, false - unchecked
"""
self.selenium.execute_javascript("window.scrollTo(0, document.body.scrollHeight)")
self.selenium.execute_javascript("window.scrollTo(document.body.scrollHeight, 0)")
for field,expected_value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["default_checkbox"].format(field)
self.selenium.page_should_contain_element(locator)
self.selenium.wait_until_element_is_visible(locator,
error= "Element is not displayed for the user")
actual_value = self.selenium.get_element_attribute(locator, "alt")
self.builtin.log("Actual value of " + locator + " is " + actual_value)
if not str(expected_value).lower() == str(actual_value).lower() :
raise Exception (f"Checkbox value in {field} is {actual_value} but it should be {expected_value}")
def verify_dropdown_value(self,**kwargs):
""" This method validates the dropdown value for the field passed in kwargs
Pass the field name and expected value to be verified from the tests using
keyword arguments
"""
for field,expected_value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["default_dropdown_value"].format(field,expected_value)
self.selenium.wait_until_page_contains_element(locator, timeout=60)
self.selenium.wait_until_element_is_visible(locator,
error= "Element is not displayed for the user")
actual_value = self.selenium.get_webelement(locator).text
self.builtin.log("Actual value of " + locator + " is " + actual_value)
if not str(expected_value).lower() == str(actual_value).lower() :
raise Exception (f"Dropdown value in {field} is {actual_value} but it should be {expected_value}")
def verify_action_button_status(self, **kwargs):
""" Verify the action button is disabled/enabled for the user
we have to pass the name of the button and the expected status
of the action button as either enabled or disabled
"""
for button,expected_value in kwargs.items():
locator = eda_lex_locators["eda_settings"]["action_button"].format(button)
self.selenium.page_should_contain_element(locator)
self.selenium.wait_until_element_is_visible(locator,
error= f"Element '{button}' button is not displayed for the user")
time.sleep(1)
actual_value = self.selenium.get_webelement(locator).get_attribute("disabled")
expected_value = bool(expected_value == "disabled")
if not str(expected_value).lower() == str(actual_value).lower() :
raise Exception (f"Element {button} button status is {actual_value} instead of {expected_value}")
def verify_text_appears(self, textMessage):
""" Verify the text message is displayed
this message gets displayed when the 'Run copy' button is clicked
in both read and edit mode
"""
time.sleep(1) #No other element to wait until this page loads so using sleep
locator = eda_lex_locators["eda_settings_courses"]["text_message"].format(textMessage)
self.selenium.wait_until_element_is_enabled(locator,
error="Run copy text is not displayed")
text = self.selenium.get_webelement(locator).get_attribute("className")
self.builtin.log("The text message is " + text)
if "slds-hide" in text:
raise Exception(f"The text message {textMessage} is not displayed")
def scroll_web_page(self):
""" This method will scroll to the bottom of the page and back to the top using javascript
page scroll commands
"""
self.selenium.execute_javascript("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(0.1)
self.selenium.execute_javascript("window.scrollTo(document.body.scrollHeight, 0)")
| bsd-3-clause | -1,373,121,710,963,524,900 | 48.406475 | 125 | 0.624063 | false |
A3sal0n/FalconGate | lib/objects.py | 1 | 18368 | import collections
from lib.logger import *
class HostAlertTemplate:
def __init__(self, homenet, alert):
self.homenet = homenet
self.alert = alert
self.subject = "A " + alert[6] + " alert was reported for host " + alert[7]
self.indicators = alert[8].replace('.', '[.]').split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert for the device below:\r\n\r\n" \
"IP address: " + self.alert[7] + "\r\n" \
"Hostname: " + str(self.homenet.hosts[self.alert[7]].hostname) + "\r\n" \
"MAC address: " + str(self.homenet.hosts[self.alert[7]].mac) + "\r\n" \
"MAC vendor: " + str(self.homenet.hosts[self.alert[7]].vendor) + "\r\n" \
"Operating system family: " + "\r\n".join(self.homenet.hosts[self.alert[7]].os_family) + "\r\n" \
"Device family: " + str("\r\n".join(self.homenet.hosts[self.alert[7]].device_family)) + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"This is the first time this incident is reported.\r\n" \
"We recommend to investigate this issue as soon as possible."
class AccountBreachAlertTemplate:
def __init__(self, alert):
self.alert = alert
self.subject = "A " + alert[6] + " alert was reported for account " + alert[7]
self.indicators = alert[8].split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert:\r\n\r\n" \
"Account at risk: " + self.alert[7] + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"This is the first time this incident is reported.\r\n" \
"We recommend to change immediately the password for this account to prevent further misuse by" \
" malicious hackers."
class DefaultCredsAlertTemplate:
def __init__(self, homenet, alert):
self.homenet = homenet
self.alert = alert
self.subject = "An account with default vendor credentials was found on host " + alert[7]
self.indicators = alert[8].replace('.', '[.]').split('|')
self.references = alert[11].split('|')
self.body = ''
def create_body(self):
self.body = "Falcongate has reported a " + self.alert[6] + " alert for the device below:\r\n\r\n" \
"IP address: " + self.alert[7] + "\r\n" \
"Hostname: " + str(self.homenet.hosts[self.alert[7]].hostname) + "\r\n" \
"MAC address: " + str(self.homenet.hosts[self.alert[7]].mac) + "\r\n" \
"MAC vendor: " + str(self.homenet.hosts[self.alert[7]].vendor) + "\r\n" \
"Operating system family: " + "\r\n".join(self.homenet.hosts[self.alert[7]].os_family) + "\r\n" \
"Device family: " + str("\r\n".join(self.homenet.hosts[self.alert[7]].device_family)) + "\r\n\r\n" \
"Description: " + self.alert[10] + "\r\n\r\n" \
"The following indicators were detected:\r\n" + str("\r\n".join(self.indicators)) + "\r\n\r\n" \
"References:\r\n" + str("\r\n".join(self.references)) + "\r\n\r\n" \
"We recommend you to fix this issue as soon as possible."
class DNSRequest:
def __init__(self):
self.ts = None
self.lseen = None
self.query = None
self.sld = None
self.tld = None
self.cip = None
self.sip = None
self.qtype = None
self.qresult = None
self.bad = False
self.counter = 0
class HTTPObject:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.dest_port = None
self.host = None
# {'url': ['method', 'status_code', 'user_agent', 'referrer', 'response_body_len', 'proxied', 'mime_type']}
self.urls = {}
class Conn:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.dst_port = None
self.proto = None
self.service = None
self.direction = None
self.duration = 0
self.client_bytes = 0
self.server_bytes = 0
self.client_packets = 0
self.server_packets = 0
self.src_country_code = None
self.src_country_name = None
self.dst_country_code = None
self.dst_country_name = None
self.counter = 0
class PortScan:
def __init__(self):
self.ts = None
self.lseen = None
self.src_ip = None
self.dst_ip = None
self.duration = None
class Host:
def __init__(self):
self.ts = None
self.lseen = None
self.mac = None
self.ip = None
self.hostname = None
self.vendor = None
self.os_family = []
self.device_family = []
self.dga_domains = []
self.spammed_domains = []
self.user_agents = []
self.dns = {}
self.conns = {}
self.files = {}
self.scans = {}
self.alerts = []
self.interesting_urls = []
self.tcp_ports = []
self.udp_ports = []
self.vuln_accounts = []
class Network:
def __init__(self):
self.pid = None
self.executable = None
self.args = []
self.hosts = {}
self.mac_history = {}
self.interface = None
self.mac = None
self.ip = None
self.gateway = None
self.netmask = None
self.net_cidr = None
self.bad_ips = {'Tor': [], 'Malware': [], 'Botnet': [], 'Hacking': [], 'Phishing': [], 'Ransomware': [],
'Ads': [], 'User': []}
self.bad_domains = {'Tor': [], 'Malware': [], 'Botnet': [], 'Hacking': [], 'Phishing': [], 'Ransomware': [],
'Ads': [], 'Crypto-miners': [], 'User': []}
self.user_blacklist = []
self.user_whitelist = []
self.user_domain_blacklist = []
self.user_domain_whitelist = []
self.target_mime_types = ["application/x-7z-compressed", "application/x-ace-compressed", "application/x-shockwave-flash",
"application/pdf", "application/vnd.android.package-archive", "application/octet-stream",
"application/x-bzip", "application/x-bzip2", "application/x-debian-package", "application/java-archive",
" application/javascript", "application/x-msdownload", "application/x-ms-application", "application/vnd.ms-excel",
"application/vnd.ms-excel.addin.macroenabled.12", "application/vnd.ms-excel.sheet.binary.macroenabled.12",
"application/vnd.ms-excel.template.macroenabled.12", "application/vnd.ms-excel.sheet.macroenabled.12",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template", "application/vnd.ms-powerpoint.slide.macroenabled.12",
"application/vnd.ms-powerpoint.presentation.macroenabled.12", "application/vnd.ms-powerpoint.slideshow.macroenabled.12",
"application/vnd.ms-powerpoint.template.macroenabled.12", "application/msword", "application/vnd.ms-word.document.macroenabled.12",
"application/vnd.ms-word.template.macroenabled.12", "application/x-rar-compressed", "application/x-tar", "application/zip", "application/x-dosexec",
"application/x-ms-installer", "application/x-elf", "application/x-sh", "text/x-perl", "text/x-python", "image/x-icon", "application/x-executable"]
self.tld_whitelist = ['local', 'test', 'localhost', 'example', 'invalid', 'arpa']
# Malicious TLDs
# https://www.tripwire.com/state-of-security/security-data-protection/cyber-security/most-suspicious-tlds-revealed-by-blue-coat-systems/
# https://www.spamhaus.org/statistics/tlds/
self.tld_blacklist = ['zip', 'review', 'country', 'kim', 'cricket', 'science', 'work', 'party', 'gq', 'link',
'gdn', 'stream', 'download', 'top', 'us', 'study', 'click', 'biz']
self.vt_api_key = None
self.dst_emails = None
self.email_watchlist = []
self.fg_intel_creds = None
self.fg_intel_ip = None
self.fg_intel_domains = None
self.vt_api_domain_url = None
self.vt_api_ip_url = None
self.vt_api_file_url = None
self.hibp_api_url = None
self.mailer_mode = None
self.mailer_address = None
self.mailer_pwd = None
self.allow_tor = None
self.last_alert_id = 0
self.blacklist_sources_ip = {}
self.blacklist_sources_domain = {}
class Report:
def __init__(self, alert):
self.alert = alert
self.alert_name = None
self.description = None
self.src_mac = None
self.src_ip = None
self.vendor = None
self.vt_reports = []
class Indicator:
def __init__(self):
self.DGA = None
self.domain = []
self.dst_ip = []
class File:
def __init__(self):
self.ts = None
self.fuid = None
self.lseen = None
self.tx_hosts = None
self.rx_hosts = None
self.conn_id = None
self.mime_type = None
self.md5 = None
self.sha1 = None
self.size = None
self.vt_flag = False
self.vt_positives = 0
self.vt_report = None
class DefaultCredentials:
def __init__(self):
self.service = ''
self.port = ''
self.user = ''
self.password = ''
class Country:
def __init__(self, code, name):
self.code = code
self.name = name
self.is_risky = self.is_risky(code)
self.hourly_stats = {}
@staticmethod
def is_risky(ccode):
risk_countries = ["CN", "US", "TR", "BR", "RU", "VN", "JP", "IN", "TW", "RO", "HU"]
if ccode in risk_countries:
return True
else:
return False
def get_stats(self, stime, etime):
sout = {"bytes_sent": 0, "bytes_received": 0, "pqt_sent": 0, "pqt_received": 0, "nconn": 0}
skeys = sorted(self.hourly_stats)
try:
for k in skeys:
if stime <= k <= etime:
sout["bytes_sent"] += self.hourly_stats[k].data_sent
sout["bytes_received"] += self.hourly_stats[k].data_received
sout["pqt_sent"] += self.hourly_stats[k].pqt_sent
sout["pqt_received"] += self.hourly_stats[k].pqt_received
sout["nconn"] += self.hourly_stats[k].nconn
except Exception as e:
log.debug('FG-ERROR: ' + str(e.__doc__) + " - " + str(e))
return sout
class HourStats:
def __init__(self):
self.data_sent = 0
self.data_received = 0
self.pqt_sent = 0
self.pqt_received = 0
self.nconn = 0
# Other useful stuff
CC = {
"AF": "AFGHANISTAN",
"AX": "ALAND ISLANDS",
"AL": "ALBANIA",
"DZ": "ALGERIA",
"AS": "AMERICAN SAMOA",
"AD": "ANDORRA",
"AO": "ANGOLA",
"AI": "ANGUILLA",
"AQ": "ANTARCTICA",
"AG": "ANTIGUA AND BARBUDA",
"AR": "ARGENTINA",
"AM": "ARMENIA",
"AW": "ARUBA",
"AU": "AUSTRALIA",
"AT": "AUSTRIA",
"AZ": "AZERBAIJAN",
"BS": "BAHAMAS",
"BH": "BAHRAIN",
"BD": "BANGLADESH",
"BB": "BARBADOS",
"BY": "BELARUS",
"BE": "BELGIUM",
"BZ": "BELIZE",
"BJ": "BENIN",
"BM": "BERMUDA",
"BT": "BHUTAN",
"BO": "BOLIVIA, PLURINATIONAL STATE OF",
"BQ": "BONAIRE, SINT EUSTATIUS AND SABA",
"BA": "BOSNIA AND HERZEGOVINA",
"BW": "BOTSWANA",
"BV": "BOUVET ISLAND",
"BR": "BRAZIL",
"IO": "BRITISH INDIAN OCEAN TERRITORY",
"BN": "BRUNEI DARUSSALAM",
"BG": "BULGARIA",
"BF": "BURKINA FASO",
"BI": "BURUNDI",
"KH": "CAMBODIA",
"CM": "CAMEROON",
"CA": "CANADA",
"CV": "CAPE VERDE",
"KY": "CAYMAN ISLANDS",
"CF": "CENTRAL AFRICAN REPUBLIC",
"TD": "CHAD",
"CL": "CHILE",
"CN": "CHINA",
"CX": "CHRISTMAS ISLAND",
"CC": "COCOS (KEELING) ISLANDS",
"CO": "COLOMBIA",
"KM": "COMOROS",
"CG": "CONGO",
"CD": "CONGO, THE DEMOCRATIC REPUBLIC OF THE",
"CK": "COOK ISLANDS",
"CR": "COSTA RICA",
"CI": "COTE D'IVOIRE",
"HR": "CROATIA",
"CU": "CUBA",
"CW": "CURACAO",
"CY": "CYPRUS",
"CZ": "CZECH REPUBLIC",
"DK": "DENMARK",
"DJ": "DJIBOUTI",
"DM": "DOMINICA",
"DO": "DOMINICAN REPUBLIC",
"EC": "ECUADOR",
"EG": "EGYPT",
"SV": "EL SALVADOR",
"GQ": "EQUATORIAL GUINEA",
"ER": "ERITREA",
"EE": "ESTONIA",
"EU": "EUROPE",
"ET": "ETHIOPIA",
"FK": "FALKLAND ISLANDS (MALVINAS)",
"FO": "FAROE ISLANDS",
"FJ": "FIJI",
"FI": "FINLAND",
"FR": "FRANCE",
"GF": "FRENCH GUIANA",
"PF": "FRENCH POLYNESIA",
"TF": "FRENCH SOUTHERN TERRITORIES",
"GA": "GABON",
"GM": "GAMBIA",
"GE": "GEORGIA",
"DE": "GERMANY",
"GH": "GHANA",
"GI": "GIBRALTAR",
"GR": "GREECE",
"GL": "GREENLAND",
"GD": "GRENADA",
"GP": "GUADELOUPE",
"GU": "GUAM",
"GT": "GUATEMALA",
"GG": "GUERNSEY",
"GN": "GUINEA",
"GW": "GUINEA-BISSAU",
"GY": "GUYANA",
"HT": "HAITI",
"HM": "HEARD ISLAND AND MCDONALD ISLANDS",
"VA": "HOLY SEE (VATICAN CITY STATE)",
"HN": "HONDURAS",
"HK": "HONG KONG",
"HU": "HUNGARY",
"IS": "ICELAND",
"IN": "INDIA",
"ID": "INDONESIA",
"IR": "IRAN, ISLAMIC REPUBLIC OF",
"IQ": "IRAQ",
"IE": "IRELAND",
"IM": "ISLE OF MAN",
"IL": "ISRAEL",
"IT": "ITALY",
"JM": "JAMAICA",
"JP": "JAPAN",
"JE": "JERSEY",
"JO": "JORDAN",
"KZ": "KAZAKHSTAN",
"KE": "KENYA",
"KI": "KIRIBATI",
"KP": "KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF",
"KR": "KOREA, REPUBLIC OF",
"KW": "KUWAIT",
"KG": "KYRGYZSTAN",
"LA": "LAO PEOPLE'S DEMOCRATIC REPUBLIC",
"LV": "LATVIA",
"LB": "LEBANON",
"LS": "LESOTHO",
"LR": "LIBERIA",
"LY": "LIBYA",
"LI": "LIECHTENSTEIN",
"LT": "LITHUANIA",
"LU": "LUXEMBOURG",
"MO": "MACAO",
"MK": "MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF",
"MG": "MADAGASCAR",
"MW": "MALAWI",
"MY": "MALAYSIA",
"MV": "MALDIVES",
"ML": "MALI",
"MT": "MALTA",
"MH": "MARSHALL ISLANDS",
"MQ": "MARTINIQUE",
"MR": "MAURITANIA",
"MU": "MAURITIUS",
"YT": "MAYOTTE",
"MX": "MEXICO",
"FM": "MICRONESIA, FEDERATED STATES OF",
"MD": "MOLDOVA, REPUBLIC OF",
"MC": "MONACO",
"MN": "MONGOLIA",
"ME": "MONTENEGRO",
"MS": "MONTSERRAT",
"MA": "MOROCCO",
"MZ": "MOZAMBIQUE",
"MM": "MYANMAR",
"NA": "NAMIBIA",
"NR": "NAURU",
"NP": "NEPAL",
"NL": "NETHERLANDS",
"NC": "NEW CALEDONIA",
"NZ": "NEW ZEALAND",
"NI": "NICARAGUA",
"NE": "NIGER",
"NG": "NIGERIA",
"NU": "NIUE",
"NF": "NORFOLK ISLAND",
"MP": "NORTHERN MARIANA ISLANDS",
"NO": "NORWAY",
"OM": "OMAN",
"PK": "PAKISTAN",
"PW": "PALAU",
"PS": "PALESTINE, STATE OF",
"PA": "PANAMA",
"PG": "PAPUA NEW GUINEA",
"PY": "PARAGUAY",
"PE": "PERU",
"PH": "PHILIPPINES",
"PN": "PITCAIRN",
"PL": "POLAND",
"PT": "PORTUGAL",
"PR": "PUERTO RICO",
"QA": "QATAR",
"RE": "REUNION",
"RO": "ROMANIA",
"RU": "RUSSIAN FEDERATION",
"RW": "RWANDA",
"BL": "SAINT BARTHELEMY",
"SH": "SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA",
"KN": "SAINT KITTS AND NEVIS",
"LC": "SAINT LUCIA",
"MF": "SAINT MARTIN (FRENCH PART)",
"PM": "SAINT PIERRE AND MIQUELON",
"VC": "SAINT VINCENT AND THE GRENADINES",
"WS": "SAMOA",
"SM": "SAN MARINO",
"ST": "SAO TOME AND PRINCIPE",
"SA": "SAUDI ARABIA",
"SN": "SENEGAL",
"RS": "SERBIA",
"SC": "SEYCHELLES",
"SL": "SIERRA LEONE",
"SG": "SINGAPORE",
"SX": "SINT MAARTEN (DUTCH PART)",
"SK": "SLOVAKIA",
"SI": "SLOVENIA",
"SB": "SOLOMON ISLANDS",
"SO": "SOMALIA",
"ZA": "SOUTH AFRICA",
"GS": "SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS",
"SS": "SOUTH SUDAN",
"ES": "SPAIN",
"LK": "SRI LANKA",
"SD": "SUDAN",
"SR": "SURINAME",
"SJ": "SVALBARD AND JAN MAYEN",
"SZ": "SWAZILAND",
"SE": "SWEDEN",
"CH": "SWITZERLAND",
"SY": "SYRIAN ARAB REPUBLIC",
"TW": "TAIWAN, PROVINCE OF CHINA",
"TJ": "TAJIKISTAN",
"TZ": "TANZANIA, UNITED REPUBLIC OF",
"TH": "THAILAND",
"TL": "TIMOR-LESTE",
"TG": "TOGO",
"TK": "TOKELAU",
"TO": "TONGA",
"TT": "TRINIDAD AND TOBAGO",
"TN": "TUNISIA",
"TR": "TURKEY",
"TM": "TURKMENISTAN",
"TC": "TURKS AND CAICOS ISLANDS",
"TV": "TUVALU",
"UG": "UGANDA",
"UA": "UKRAINE",
"AE": "UNITED ARAB EMIRATES",
"GB": "UNITED KINGDOM",
"US": "UNITED STATES",
"UM": "UNITED STATES MINOR OUTLYING ISLANDS",
"UY": "URUGUAY",
"UZ": "UZBEKISTAN",
"VU": "VANUATU",
"VE": "VENEZUELA, BOLIVARIAN REPUBLIC OF",
"VN": "VIET NAM",
"VG": "VIRGIN ISLANDS, BRITISH",
"VI": "VIRGIN ISLANDS, U.S.",
"WF": "WALLIS AND FUTUNA",
"EH": "WESTERN SAHARA",
"YE": "YEMEN",
"ZM": "ZAMBIA",
"ZW": "ZIMBABWE",
} | gpl-3.0 | -3,719,637,293,147,984,000 | 32.642857 | 182 | 0.520579 | false |
raymondanthony/youtube-dl | youtube_dl/extractor/ustream.py | 1 | 3720 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
)
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
IE_NAME = 'ustream'
_TEST = {
'url': 'http://www.ustream.tv/recorded/20274954',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
'id': '20274954',
'ext': 'flv',
'uploader': 'Young Americans for Liberty',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
# some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('videoID')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
desktop_video_id = self._html_search_regex(
r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
return self.url_result(desktop_url, 'Ustream')
video_url = 'http://tcdn.ustream.tv/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
webpage, 'title')
uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
webpage, 'uploader', fatal=False, flags=re.DOTALL)
thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': video_title,
'uploader': uploader,
'thumbnail': thumbnail,
}
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
_TEST = {
'url': 'http://www.ustream.tv/channel/channeljapan',
'info_dict': {
'id': '10874166',
},
'playlist_mincount': 17,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
reply = self._download_json(
compat_urlparse.urljoin(BASE, next_url), display_id,
note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
entries = [
self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
for vid in video_ids]
return {
'_type': 'playlist',
'id': channel_id,
'display_id': display_id,
'entries': entries,
}
| unlicense | 8,401,983,062,399,923,000 | 35.831683 | 102 | 0.53414 | false |
oscarlorentzon/repstruct | tests/test_descriptor.py | 1 | 4060 | import unittest
import numpy as np
from repstruct.features.descriptor import normalize_by_division, classify_euclidean, normalize, classify_cosine
class TestDescriptor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNormalize(self):
v = [1, 1]
X = np.array([v])
result = normalize(X)
norm = np.sqrt(np.sum(np.multiply(result, result), axis=1))
self.assertLess(abs(1.0 - norm), 0.0000001, 'The norm is not one for the normalized array.')
def testNormalizeMultipleVectors(self):
v = [1, 1]
X = np.array([v, v, v])
result = normalize(X)
norm = np.sqrt(np.sum(np.multiply(result, result), axis=1))
self.assertLess(abs(1.0 - norm[0]), 0.0000001, 'The norm is not one for the normalized array.')
self.assertLess(abs(1.0 - norm[1]), 0.0000001, 'The norm is not one for the normalized array.')
self.assertLess(abs(1.0 - norm[2]), 0.0000001, 'The norm is not one for the normalized array.')
def testNormalizeByDivision(self):
l = [1, 2]
v = np.array(l)
n = np.array(l)
result = normalize_by_division(v, n)
self.assertLess(abs(1.0 - np.linalg.norm(result)), 0.0000001, 'The norm is not one for the normalized array.')
self.assertEquals(result[0], result[1], 'The vector items should be equal after normalization.')
def testClassifyEuclideanOneVector(self):
X = normalize(np.array([[1, 1]]))
C = normalize(np.array([[1, 1], [0, 1]]))
result = classify_euclidean(X, C)
self.assertEqual(2, result.shape[0])
self.assertEqual(1, result[0])
self.assertEqual(0, result[1])
def testClassifyEuclideanMultipleVectors(self):
X = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
C = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
result = classify_euclidean(X, C)
self.assertEqual(3, result.shape[0])
self.assertEqual(3, np.sum(result))
self.assertEqual(1, result[0])
self.assertEqual(1, result[1])
self.assertEqual(1, result[2])
def testClassifyEuclideanMultipleVectorsSameCenter(self):
X = normalize(np.array([[1, 0, 0], [1, 0, 0], [1, 0, 0]]))
C = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
result = classify_euclidean(X, C)
self.assertEqual(3, result.shape[0])
self.assertEqual(3, np.sum(result))
self.assertEqual(3, result[0])
self.assertEqual(0, result[1])
self.assertEqual(0, result[2])
def testClassifyCosineOneVector(self):
X = normalize(np.array([[1, 1]]))
C = normalize(np.array([[1, 1], [0, 1]]))
result = classify_cosine(X, C)
self.assertEqual(2, result.shape[0])
self.assertEqual(1, result[0])
self.assertEqual(0, result[1])
def testClassifyCosineMultipleVectors(self):
X = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
C = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
result = classify_cosine(X, C)
self.assertEqual(3, result.shape[0])
self.assertEqual(3, np.sum(result))
self.assertEqual(1, result[0])
self.assertEqual(1, result[1])
self.assertEqual(1, result[2])
def testClassifyCosineMultipleVectorsSameCenter(self):
X = normalize(np.array([[1, 0, 0], [1, 0, 0], [1, 0, 0]]))
C = normalize(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
result = classify_cosine(X, C)
self.assertEqual(3, result.shape[0])
self.assertEqual(3, np.sum(result))
self.assertEqual(3, result[0])
self.assertEqual(0, result[1])
self.assertEqual(0, result[2])
if __name__ == '__main__':
unittest.main() | bsd-3-clause | 1,542,118,717,912,962,300 | 33.709402 | 118 | 0.553941 | false |
valdt/Wumpus | server/serverHandler.py | 1 | 1417 | import pickle, socket, time, threading
class ServerHandler:
def __init__(self,host,port):
self.activePlayers = []
self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Starting arguments for the socket are general default which i took
self.serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #from the offical python documentation.
self.serverSocket.bind((host, port)) #Lock'n'Load ... bind*
self.serverSocket.listen(10) #lissening for new connections
def pulse(self,payload,clientsocket): #Testing connection to client.
try:
defaultError = ["error","Replie took to long and TTL expired."]
clientsocket.send(pickle.dumps(payload, -1))
ttl = 0
while True:
ttl += 1
data = clientsocket.recv(2048)
if data and data != "":
return pickle.loads(data)
elif ttl > 10:
return defaultError
except:
defaultError = ["error","Function failed"]
return defaultError
def getPlayerNames(self): #Going through all active players grabbing there names and appending them to a list, used in filters arround the program.
playerNames = {}
for player in self.activePlayers:
playerNames[player.name] = player
return playerNames
| lgpl-3.0 | 3,241,649,935,972,491,000 | 47.862069 | 151 | 0.62103 | false |
mcrav/pyxtal | xtal/xtal.py | 1 | 1634 | import numpy as np
def get_metric_matrix(a, b, c, alpha, beta, gamma):
'''
Create metric matrix as numpy array from unit cell parameters.
Return metric matrix.
'''
return np.array([[a**2, a*b*np.cos(gamma), a*c*np.cos(beta)],
[b*a*np.cos(gamma), b**2, b*c*np.cos(alpha)],
[c*a*np.cos(beta), c*b*np.cos(alpha), c**2]])
def get_bond_distance(atom1_coords, atom2_coords, a, b, c, alpha, beta, gamma):
'''
Get distance between 2 atomic positions. Return distance.
'''
delta1 = a*(atom1_coords[0] - atom2_coords[0])
delta2 = b*(atom1_coords[1] - atom2_coords[1])
delta3 = c*(atom1_coords[2] - atom2_coords[2])
return (np.sqrt(delta1**2 + delta2**2 + delta3**2 +
(2*delta1*delta2*np.cos(gamma)) +
(2*delta1*delta3*np.cos(beta)) +
(2*delta2*delta3*np.cos(alpha))))
def get_bond_angle(atom1_coords, atom2_coords, atom3_coords,
a, b, c, alpha, beta, gamma):
'''
Get angle between 3 atomic positions. Return angle.
'''
r = get_bond_distance(atom1_coords, atom2_coords,
a, b, c, alpha, beta, gamma)
s = get_bond_distance(atom2_coords, atom3_coords,
a, b, c, alpha, beta, gamma)
X1 = np.array(np.array(atom2_coords) - np.array(atom1_coords))
X2 = np.array(np.array(atom2_coords) - np.array(atom3_coords))
metrix_matrix = get_metric_matrix(a, b, c, alpha, beta, gamma)
cosphi = (np.dot(np.dot(np.transpose(X1),metrix_matrix),X2)) / (r*s)
angle = np.arccos(cosphi)
degAngle = np.degrees(angle)
return degAngle
| mit | 2,571,340,122,225,649,000 | 39.85 | 79 | 0.586903 | false |
DeepGnosis/keras | tests/keras/backend/test_backends.py | 1 | 28072 | import sys
import pytest
from numpy.testing import assert_allclose
import numpy as np
from keras.backend import theano_backend as KTH
from keras.backend import tensorflow_backend as KTF
from keras.utils.np_utils import convert_kernel
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(getattr(KTH, function_name)(xth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = getattr(KTH, first_function_name)(xth, **first_function_args)
ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)
zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
class TestBackend(object):
def test_linear_operations(self):
check_two_tensor_operation('dot', (4, 2), (2, 4))
check_two_tensor_operation('dot', (4, 2), (5, 2, 3))
check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
axes=(2, 2))
check_single_tensor_operation('transpose', (4, 2))
check_single_tensor_operation('reverse', (4, 3, 2), axes=1)
check_single_tensor_operation('reverse', (4, 3, 2), axes=(1, 2))
def test_shape_operations(self):
# concatenate
xval = np.random.random((4, 3))
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random((4, 2))
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
check_single_tensor_operation('permute_dimensions', (4, 2, 3),
pattern=(2, 0, 1))
check_single_tensor_operation('repeat', (4, 1), n=3)
check_single_tensor_operation('flatten', (4, 1))
check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
'squeeze', {'axis': 2},
(4, 3, 1, 1))
def test_repeat_elements(self):
reps = 3
for ndims in [1, 2, 3]:
shape = np.arange(2, 2 + ndims)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
for rep_axis in range(ndims):
np_rep = np.repeat(arr, reps, axis=rep_axis)
th_rep = KTH.eval(
KTH.repeat_elements(arr_th, reps, axis=rep_axis))
tf_rep = KTF.eval(
KTF.repeat_elements(arr_tf, reps, axis=rep_axis))
assert th_rep.shape == np_rep.shape
assert tf_rep.shape == np_rep.shape
assert_allclose(np_rep, th_rep, atol=1e-05)
assert_allclose(np_rep, tf_rep, atol=1e-05)
def test_tile(self):
shape = (3, 4)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
n = (2, 1)
th_rep = KTH.eval(KTH.tile(arr_th, n))
tf_rep = KTF.eval(KTF.tile(arr_tf, n))
assert_allclose(tf_rep, th_rep, atol=1e-05)
def test_value_manipulation(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
# get_value
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# set_value
val = np.random.random((4, 2))
KTH.set_value(xth, val)
KTF.set_value(xtf, val)
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# count_params
assert KTH.count_params(xth) == KTF.count_params(xtf)
# print_tensor
check_single_tensor_operation('print_tensor', ())
check_single_tensor_operation('print_tensor', (2,))
check_single_tensor_operation('print_tensor', (4, 3))
check_single_tensor_operation('print_tensor', (1, 2, 3))
val = np.random.random((3, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
assert KTH.get_variable_shape(xth) == KTF.get_variable_shape(xtf)
def test_elementwise_operations(self):
check_single_tensor_operation('max', (4, 2))
check_single_tensor_operation('max', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2))
check_single_tensor_operation('min', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('mean', (4, 2))
check_single_tensor_operation('mean', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=-1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('std', (4, 2))
check_single_tensor_operation('std', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('std', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('prod', (4, 2))
check_single_tensor_operation('prod', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('prod', (4, 2, 3), axis=[1, -1])
# does not work yet, wait for bool <-> int casting in TF (coming soon)
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
#
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('argmax', (4, 2))
check_single_tensor_operation('argmax', (4, 2), axis=1)
check_single_tensor_operation('argmin', (4, 2))
check_single_tensor_operation('argmin', (4, 2), axis=1)
check_single_tensor_operation('square', (4, 2))
check_single_tensor_operation('abs', (4, 2))
check_single_tensor_operation('sqrt', (4, 2))
check_single_tensor_operation('exp', (4, 2))
check_single_tensor_operation('log', (4, 2))
check_single_tensor_operation('round', (4, 2))
check_single_tensor_operation('sign', (4, 2))
check_single_tensor_operation('pow', (4, 2), a=3)
check_single_tensor_operation('clip', (4, 2), min_value=0.4,
max_value=0.6)
# two-tensor ops
check_two_tensor_operation('equal', (4, 2), (4, 2))
check_two_tensor_operation('not_equal', (4, 2), (4, 2))
check_two_tensor_operation('greater', (4, 2), (4, 2))
check_two_tensor_operation('greater_equal', (4, 2), (4, 2))
check_two_tensor_operation('lesser', (4, 2), (4, 2))
check_two_tensor_operation('lesser_equal', (4, 2), (4, 2))
check_two_tensor_operation('maximum', (4, 2), (4, 2))
check_two_tensor_operation('minimum', (4, 2), (4, 2))
def test_gradient(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
expth = xth * KTH.exp(xth)
exptf = xtf * KTF.exp(xtf)
lossth = KTH.sum(expth)
losstf = KTF.sum(exptf)
zero_lossth = KTH.stop_gradient(lossth)
zero_losstf = KTF.stop_gradient(losstf)
gradth = KTH.gradients(lossth, [expth])
gradtf = KTF.gradients(losstf, [exptf])
zero_gradth = KTH.gradients(lossth + zero_lossth, [expth])
zero_gradtf = KTF.gradients(losstf + zero_losstf, [exptf])
zth = KTH.eval(gradth[0])
ztf = KTF.eval(gradtf[0])
zero_zth = KTH.eval(zero_gradth[0])
zero_ztf = KTF.eval(zero_gradtf[0])
assert zth.shape == ztf.shape
assert zero_zth.shape == zero_ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
assert_allclose(zero_zth, zero_ztf, atol=1e-05)
assert_allclose(zero_zth, zth, atol=1e-05)
assert_allclose(zero_ztf, ztf, atol=1e-05)
def test_function(self):
val = np.random.random((4, 2))
input_val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = KTH.placeholder(ndim=2)
ytf = KTF.placeholder(ndim=2)
exp_th = KTH.square(xth) + yth
exp_tf = KTF.square(xtf) + ytf
update_th = xth * 2
update_tf = xtf * 2
fth = KTH.function([yth], [exp_th], updates=[(xth, update_th)])
ftf = KTF.function([ytf], [exp_tf], updates=[(xtf, update_tf)])
function_outputs_th = fth([input_val])[0]
function_outputs_tf = ftf([input_val])[0]
assert function_outputs_th.shape == function_outputs_tf.shape
assert_allclose(function_outputs_th, function_outputs_tf, atol=1e-05)
new_val_th = KTH.get_value(xth)
new_val_tf = KTF.get_value(xtf)
assert new_val_th.shape == new_val_tf.shape
assert_allclose(new_val_th, new_val_tf, atol=1e-05)
def test_rnn(self):
# implement a simple RNN
input_dim = 8
output_dim = 4
timesteps = 5
input_val = np.random.random((32, timesteps, input_dim))
init_state_val = np.random.random((32, output_dim))
W_i_val = np.random.random((input_dim, output_dim))
W_o_val = np.random.random((output_dim, output_dim))
def rnn_step_fn(input_dim, output_dim, K):
W_i = K.variable(W_i_val)
W_o = K.variable(W_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = K.dot(x, W_i) + K.dot(prev_output, W_o)
return output, [output]
return step_function
# test default setup
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = [KTH.variable(init_state_val)]
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 1
th_state = KTH.eval(new_states[0])
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = [KTF.variable(init_state_val)]
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=False,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 1
tf_state = KTF.eval(new_states[0])
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
assert_allclose(tf_state, th_state, atol=1e-04)
# test unroll
unrolled_last_output, unrolled_outputs, unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None,
unroll=True,
input_length=timesteps)
unrolled_th_last_output = KTH.eval(unrolled_last_output)
unrolled_th_outputs = KTH.eval(unrolled_outputs)
assert len(unrolled_new_states) == 1
unrolled_th_state = KTH.eval(unrolled_new_states[0])
assert_allclose(th_last_output, unrolled_th_last_output, atol=1e-04)
assert_allclose(th_outputs, unrolled_th_outputs, atol=1e-04)
assert_allclose(th_state, unrolled_th_state, atol=1e-04)
# test go_backwards
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = [KTH.variable(init_state_val)]
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 1
th_state = KTH.eval(new_states[0])
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = [KTF.variable(init_state_val)]
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=True,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 1
tf_state = KTF.eval(new_states[0])
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
assert_allclose(tf_state, th_state, atol=1e-04)
# test unroll with backwards = True
bwd_last_output, bwd_outputs, bwd_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None)
bwd_th_last_output = KTH.eval(bwd_last_output)
bwd_th_outputs = KTH.eval(bwd_outputs)
assert len(bwd_new_states) == 1
bwd_th_state = KTH.eval(bwd_new_states[0])
bwd_unrolled_last_output, bwd_unrolled_outputs, bwd_unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None,
unroll=True,
input_length=timesteps)
bwd_unrolled_th_last_output = KTH.eval(bwd_unrolled_last_output)
bwd_unrolled_th_outputs = KTH.eval(bwd_unrolled_outputs)
assert len(bwd_unrolled_new_states) == 1
bwd_unrolled_th_state = KTH.eval(bwd_unrolled_new_states[0])
assert_allclose(bwd_th_last_output, bwd_unrolled_th_last_output, atol=1e-04)
assert_allclose(bwd_th_outputs, bwd_unrolled_th_outputs, atol=1e-04)
assert_allclose(bwd_th_state, bwd_unrolled_th_state, atol=1e-04)
# test unroll with masking
np_mask = np.random.randint(2, size=(32, timesteps))
th_mask = KTH.variable(np_mask)
masked_last_output, masked_outputs, masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask)
masked_th_last_output = KTH.eval(masked_last_output)
masked_th_outputs = KTH.eval(masked_outputs)
assert len(masked_new_states) == 1
masked_th_state = KTH.eval(masked_new_states[0])
unrolled_masked_last_output, unrolled_masked_outputs, unrolled_masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask,
unroll=True,
input_length=timesteps)
unrolled_masked_th_last_output = KTH.eval(unrolled_masked_last_output)
unrolled_masked_th_outputs = KTH.eval(unrolled_masked_outputs)
assert len(unrolled_masked_new_states) == 1
unrolled_masked_th_state = KTH.eval(unrolled_masked_new_states[0])
assert_allclose(unrolled_masked_th_last_output, masked_th_last_output, atol=1e-04)
assert_allclose(unrolled_masked_th_outputs, masked_th_outputs, atol=1e-04)
assert_allclose(unrolled_masked_th_state, masked_th_state, atol=1e-04)
def test_switch(self):
val = np.random.random()
xth = KTH.variable(val)
xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)
xtf = KTF.variable(val)
xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)
zth = KTH.eval(xth)
ztf = KTF.eval(xtf)
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_nn_operations(self):
check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
check_single_tensor_operation('softmax', (4, 10))
check_single_tensor_operation('softplus', (4, 10))
check_single_tensor_operation('sigmoid', (4, 2))
check_single_tensor_operation('hard_sigmoid', (4, 2))
check_single_tensor_operation('tanh', (4, 2))
# dropout
val = np.random.random((100, 100))
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(KTH.dropout(xth, level=0.2))
ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
assert zth.shape == ztf.shape
# dropout patterns are different, only check mean
assert np.abs(zth.mean() - ztf.mean()) < 0.05
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)
check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
def test_conv2d(self):
# TH kernel shape: (depth, input_depth, rows, cols)
# TF kernel shape: (rows, cols, input_depth, depth)
for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
input_shape = (1, 6, 5, 3)
kernel_shape = (3, 3, 3, 2)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_conv3d(self):
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (depth, input_depth, x, y, z)
# TF kernel shape: (x, y, z, input_depth, depth)
# test in dim_ordering = th
for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
# test in dim_ordering = tf
input_shape = (1, 2, 2, 2, 1)
kernel_shape = (2, 2, 2, 1, 1)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_pool2d(self):
check_single_tensor_operation('pool2d', (5, 3, 10, 12), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 3, 9, 11), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 3, 9, 11), pool_size=(2, 3),
strides=(1, 1), border_mode='valid')
def test_pool3d(self):
check_single_tensor_operation('pool3d', (5, 3, 10, 12, 5), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 3, 9, 11, 5), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 3, 9, 11, 5), pool_size=(2, 3, 2),
strides=(1, 1, 1), border_mode='valid')
def test_random_normal(self):
mean = 0.
std = 1.
rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
def test_random_uniform(self):
min = -1.
max = 1.
rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
def test_random_binomial(self):
p = 0.5
rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
def test_ctc(self):
# simplified version of TensorFlow's test
label_lens = np.expand_dims(np.asarray([5, 4]), 1)
input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps
# the Theano and Tensorflow CTC code use different methods to ensure
# numerical stability. The Theano code subtracts out the max
# before the final log, so the results are different but scale
# identically and still train properly
loss_log_probs_tf = [3.34211, 5.42262]
loss_log_probs_th = [1.73308, 3.81351]
# dimensions are batch x time x categories
labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],
dtype=np.float32)
labels_tf = KTF.variable(labels, dtype="int32")
inputs_tf = KTF.variable(inputs, dtype="float32")
input_lens_tf = KTF.variable(input_lens, dtype="int32")
label_lens_tf = KTF.variable(label_lens, dtype="int32")
res = KTF.eval(KTF.ctc_batch_cost(labels_tf, inputs_tf, input_lens_tf, label_lens_tf))
assert_allclose(res[:, 0], loss_log_probs_tf, atol=1e-05)
labels_th = KTH.variable(labels, dtype="int32")
inputs_th = KTH.variable(inputs, dtype="float32")
input_lens_th = KTH.variable(input_lens, dtype="int32")
label_lens_th = KTH.variable(label_lens, dtype="int32")
res = KTH.eval(KTH.ctc_batch_cost(labels_th, inputs_th, input_lens_th, label_lens_th))
assert_allclose(res[0, :], loss_log_probs_th, atol=1e-05)
def test_one_hot(self):
input_length = 10
nb_classes = 20
batch_size = 30
indices = np.random.randint(0, nb_classes, size=(batch_size, input_length))
oh = np.eye(nb_classes)[indices]
for K in [KTH, KTF]:
koh = K.eval(K.one_hot(K.variable(indices, dtype='int32'), nb_classes))
assert np.all(koh == oh)
if __name__ == '__main__':
pytest.main([__file__])
| mit | -5,885,894,848,769,436,000 | 40.588148 | 99 | 0.564655 | false |
pmghalvorsen/gramps_branch | gramps/gen/filters/__init__.py | 1 | 1513 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filtering framework for GRAMPS.
"""
#SystemFilters = None
CustomFilters = None
from ..const import CUSTOM_FILTERS
from ._filterlist import FilterList
from ._genericfilter import GenericFilter, GenericFilterFactory
from ._paramfilter import ParamFilter
from ._searchfilter import SearchFilter, ExactSearchFilter
#def reload_system_filters():
#global SystemFilters
#SystemFilters = FilterList(SYSTEM_FILTERS)
#SystemFilters.load()
def reload_custom_filters():
global CustomFilters
CustomFilters = FilterList(CUSTOM_FILTERS)
CustomFilters.load()
#if not SystemFilters:
#reload_system_filters()
if not CustomFilters:
reload_custom_filters()
| gpl-2.0 | -593,644,692,701,781,900 | 30.520833 | 79 | 0.759418 | false |
nischu7/paramiko | paramiko/client.py | 1 | 21630 | # Copyright (C) 2006-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SSHClient}.
"""
from binascii import hexlify
import getpass
import os
import socket
import warnings
from paramiko.agent import Agent
from paramiko.common import *
from paramiko.config import SSH_PORT
from paramiko.dsskey import DSSKey
from paramiko.hostkeys import HostKeys
from paramiko.resource import ResourceManager
from paramiko.rsakey import RSAKey
from paramiko.ssh_exception import SSHException, BadHostKeyException
from paramiko.transport import Transport
from paramiko.util import retry_on_signal
class MissingHostKeyPolicy (object):
"""
Interface for defining the policy that L{SSHClient} should use when the
SSH server's hostname is not in either the system host keys or the
application's keys. Pre-made classes implement policies for automatically
adding the key to the application's L{HostKeys} object (L{AutoAddPolicy}),
and for automatically rejecting the key (L{RejectPolicy}).
This function may be used to ask the user to verify the key, for example.
"""
def missing_host_key(self, client, hostname, key):
"""
Called when an L{SSHClient} receives a server key for a server that
isn't in either the system or local L{HostKeys} object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application).
"""
pass
class AutoAddPolicy (MissingHostKeyPolicy):
"""
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._host_keys.add(hostname, key.get_name(), key)
if client._host_keys_filename is not None:
client.save_host_keys(client._host_keys_filename)
client._log(DEBUG, 'Adding %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class RejectPolicy (MissingHostKeyPolicy):
"""
Policy for automatically rejecting the unknown hostname & key. This is
used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
client._log(DEBUG, 'Rejecting %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
raise SSHException('Server %r not found in known_hosts' % hostname)
class WarningPolicy (MissingHostKeyPolicy):
"""
Policy for logging a python-style warning for an unknown host key, but
accepting it. This is used by L{SSHClient}.
"""
def missing_host_key(self, client, hostname, key):
warnings.warn('Unknown %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
class SSHClient (object):
"""
A high-level representation of a session with an SSH server. This class
wraps L{Transport}, L{Channel}, and L{SFTPClient} to take care of most
aspects of authenticating and opening channels. A typical use case is::
client = SSHClient()
client.load_system_host_keys()
client.connect('ssh.example.com')
stdin, stdout, stderr = client.exec_command('ls -l')
You may pass in explicit overrides for authentication and server host key
checking. The default mechanism is to try to use local key files or an
SSH agent (if one is running).
@since: 1.6
"""
def __init__(self):
"""
Create a new SSHClient.
"""
self._system_host_keys = HostKeys()
self._host_keys = HostKeys()
self._host_keys_filename = None
self._log_channel = None
self._policy = RejectPolicy()
self._transport = None
self._agent = None
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by L{save_host_keys}.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If C{filename} is left as C{None}, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
@param filename: the filename to read, or C{None}
@type filename: str
@raise IOError: if a filename was provided and the file could not be
read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked I{after} keys loaded via L{load_system_host_keys},
but will be saved back by L{save_host_keys} (so they can be modified).
The missing host key policy L{AutoAddPolicy} adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
@param filename: the filename to read
@type filename: str
@raise IOError: if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written
"""
# update local host keys from file (in case other SSH clients
# have written to the known_hosts file meanwhile.
if self.known_hosts is not None:
self.load_host_keys(self.known_hosts)
f = open(filename, 'w')
for hostname, keys in self._host_keys.items():
for keytype, key in keys.items():
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
f.close()
def get_host_keys(self):
"""
Get the local L{HostKeys} object. This can be used to examine the
local host keys or change them.
@return: the local host keys
@rtype: L{HostKeys}
"""
return self._host_keys
def set_log_channel(self, name):
"""
Set the channel for logging. The default is C{"paramiko.transport"}
but it can be set to anything you want.
@param name: new channel name for logging
@type name: str
"""
self._log_channel = name
def set_missing_host_key_policy(self, policy):
"""
Set the policy to use when connecting to a server that doesn't have a
host key in either the system or local L{HostKeys} objects. The
default policy is to reject all unknown servers (using L{RejectPolicy}).
You may substitute L{AutoAddPolicy} or write your own policy class.
@param policy: the policy to use when receiving a host key from a
previously-unknown server
@type policy: L{MissingHostKeyPolicy}
"""
self._policy = policy
def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None,
key_filename=None, timeout=None, allow_agent=True, look_for_keys=True,
compress=False, sock=None):
"""
Connect to an SSH server and authenticate to it. The server's host key
is checked against the system host keys (see L{load_system_host_keys})
and any local host keys (L{load_host_keys}). If the server's hostname
is not found in either set of host keys, the missing host key policy
is used (see L{set_missing_host_key_policy}). The default policy is
to reject the key and raise an L{SSHException}.
Authentication is attempted in the following order of priority:
- The C{pkey} or C{key_filename} passed in (if any)
- Any key we can find through an SSH agent
- Any "id_rsa" or "id_dsa" key discoverable in C{~/.ssh/}
- Plain username/password auth, if a password was given
If a private key requires a password to unlock it, and a password is
passed in, that password will be used to attempt to unlock the key.
@param hostname: the server to connect to
@type hostname: str
@param port: the server port to connect to
@type port: int
@param username: the username to authenticate as (defaults to the
current local username)
@type username: str
@param password: a password to use for authentication or for unlocking
a private key
@type password: str
@param pkey: an optional private key to use for authentication
@type pkey: L{PKey}
@param key_filename: the filename, or list of filenames, of optional
private key(s) to try for authentication
@type key_filename: str or list(str)
@param timeout: an optional timeout (in seconds) for the TCP connect
@type timeout: float
@param allow_agent: set to False to disable connecting to the SSH agent
@type allow_agent: bool
@param look_for_keys: set to False to disable searching for discoverable
private key files in C{~/.ssh/}
@type look_for_keys: bool
@param compress: set to True to turn on compression
@type compress: bool
@param sock: an open socket or socket-like object (such as a
L{Channel}) to use for communication to the target host
@type sock: socket
@raise BadHostKeyException: if the server's host key could not be
verified
@raise AuthenticationException: if authentication failed
@raise SSHException: if there was any other error connecting or
establishing an SSH session
@raise socket.error: if a socket error occurred while connecting
"""
if not sock:
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
break
else:
# some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
sock = socket.socket(af, socket.SOCK_STREAM)
if timeout is not None:
try:
sock.settimeout(timeout)
except:
pass
retry_on_signal(lambda: sock.connect(addr))
t = self._transport = Transport(sock)
t.use_compression(compress=compress)
if self._log_channel is not None:
t.set_log_channel(self._log_channel)
t.start_client()
ResourceManager.register(self, t)
server_key = t.get_remote_server_key()
keytype = server_key.get_name()
if port == SSH_PORT:
server_hostkey_name = hostname
else:
server_hostkey_name = "[%s]:%d" % (hostname, port)
our_server_key = self._system_host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
our_server_key = self._host_keys.get(server_hostkey_name, {}).get(keytype, None)
if our_server_key is None:
# will raise exception if the key is rejected; let that fall out
self._policy.missing_host_key(self, server_hostkey_name, server_key)
# if the callback returns, assume the key is ok
our_server_key = server_key
if server_key != our_server_key:
raise BadHostKeyException(hostname, server_key, our_server_key)
if username is None:
username = getpass.getuser()
if key_filename is None:
key_filenames = []
elif type(key_filename) == str:
key_filenames = [ key_filename ]
else:
key_filenames = key_filename
self._auth(username, password, pkey, key_filenames, allow_agent, look_for_keys)
def close(self):
"""
Close this SSHClient and its underlying L{Transport}.
"""
if self._transport is None:
return
self._transport.close()
self._transport = None
if self._agent != None:
self._agent.close()
self._agent = None
def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False):
"""
Execute a command on the SSH server. A new L{Channel} is opened and
the requested command is executed. The command's input and output
streams are returned as python C{file}-like objects representing
stdin, stdout, and stderr.
@param command: the command to execute
@type command: str
@param bufsize: interpreted the same way as by the built-in C{file()} function in python
@type bufsize: int
@param timeout: set command's channel timeout. See L{Channel.settimeout}.settimeout
@type timeout: int
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
chan = self._transport.open_session()
if(get_pty):
chan.get_pty()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
def invoke_shell(self, term=b'vt100', width=80, height=24, width_pixels=0,
height_pixels=0):
"""
Start an interactive shell session on the SSH server. A new L{Channel}
is opened and connected to a pseudo-terminal using the requested
terminal type and size.
@param term: the terminal type to emulate (for example, C{"vt100"})
@type term: str
@param width: the width (in characters) of the terminal window
@type width: int
@param height: the height (in characters) of the terminal window
@type height: int
@param width_pixels: the width (in pixels) of the terminal window
@type width_pixels: int
@param height_pixels: the height (in pixels) of the terminal window
@type height_pixels: int
@return: a new channel connected to the remote shell
@rtype: L{Channel}
@raise SSHException: if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
chan.invoke_shell()
return chan
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
@return: a new SFTP session object
@rtype: L{SFTPClient}
"""
return self._transport.open_sftp_client()
def get_transport(self):
"""
Return the underlying L{Transport} object for this SSH connection.
This can be used to perform lower-level tasks, like opening specific
kinds of channels.
@return: the Transport for this connection
@rtype: L{Transport}
"""
return self._transport
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
"""
Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key, or for
two-factor authentication [for which it is required].)
"""
saved_exception = None
two_factor = False
allowed_types = []
if pkey is not None:
try:
self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
allowed_types = self._transport.auth_publickey(username, pkey)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
except SSHException as e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey):
try:
key = pkey_class.from_private_key_file(key_filename, password)
self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent == None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor:
keyfiles = []
rsa_key = os.path.expanduser('~/.ssh/id_rsa')
dsa_key = os.path.expanduser('~/.ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser('~/ssh/id_rsa')
dsa_key = os.path.expanduser('~/ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == [b'password'])
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
except IOError as e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException as e:
saved_exception = e
elif two_factor:
raise SSHException('Two-factor authentication requires a password')
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException('No authentication methods available')
def _log(self, level, msg):
self._transport._log(level, msg)
| lgpl-2.1 | 8,268,515,702,681,514,000 | 39.204461 | 139 | 0.605594 | false |
MWers/docker-docset | bin/add-dash-anchors.py | 1 | 1248 | #!/usr/bin/env python
"""
Add Dash docs anchor tags to html source.
"""
import argparse
import os
import re
import sys
import urllib
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help=('The file to add dash doc anchors to.'))
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true')
args = parser.parse_args()
if not os.path.isfile(args.filename):
print 'Error: File %s does not exist' % args.path
sys.exit
html = open(args.filename).read()
# Use regex to add dash docs anchors
def dashrepl(match):
(hopen, id, name, hclose) = match.group(1, 2, 3, 4)
dashname = name
dashname = re.sub('<.*?>', '', dashname)
dashname = re.sub('[^a-zA-Z0-9\.\(\)\?\',:; ]', '-', dashname)
dashname = urllib.quote(dashname)
dash = ('<a name="//apple_ref/cpp/Section/%s" class="dashAnchor"></a>' %
(dashname))
header = '<h%s id="%s">%s</h%s>' % (hopen, id, name, hclose)
return "%s\n%s" % (dash, header)
html = re.sub('<h([1-2]) id="(.*?)">(.*?)</h([1-2])>', dashrepl, html)
with open(args.filename, 'w') as f:
f.write(html)
if args.verbose:
print 'Added dash docs anchors to %s' % args.filename
| mit | 5,308,139,273,396,154,000 | 25 | 76 | 0.584135 | false |
sprucedev/DockCI-Agent | dockci/models/job_meta/stages_prepare_docker.py | 1 | 24964 | """ Docker-based preparation for the main job stages """
import json
import tarfile
from collections import defaultdict
import docker
import docker.errors
import py.error # pylint:disable=import-error
import py.path # pylint:disable=import-error
from dockci.exceptions import (AlreadyBuiltError,
DockerAPIError,
StageFailedError,
)
from dockci.models.base import ServiceBase
from dockci.models.blob import FilesystemBlob
from dockci.models.job_meta.stages import JobStageBase
from dockci.server import CONFIG
from dockci.util import (built_docker_image_id,
bytes_str,
docker_ensure_image,
IOFauxDockerLog,
normalize_stream_lines,
path_contained,
)
class InlineProjectStage(JobStageBase):
""" Stage to run project containers inline in another project job """
def get_services(self):
""" Get the services associated with the projects in this stage """
raise NotImplementedError(
"You must override the 'get_services' method"
)
def id_for_service(self, slug): # pylint:disable=no-self-use
""" Get the event series ID for a given service's slug """
return slug
def runnable(self, handle):
"""
Resolve project containers, and pass control to ``runnable_inline``
"""
all_okay = True
faux_log = IOFauxDockerLog(handle)
for service in self.get_services():
# pylint:disable=no-member
defaults = {'id': self.id_for_service(service.app_name)}
with faux_log.more_defaults(**defaults):
defaults = {'status': "Finding service %s" % service.display}
with faux_log.more_defaults(**defaults):
faux_log.update()
service_project = service.project
service_job = service.job
if service_project is None:
faux_log.update(progress="No project found")
elif service_job is None:
faux_log.update(
error="No successful, versioned job for %s" % (
service_project.name
),
)
all_okay = False
continue
else:
service.tag = service_job.tag
defaults = {'status': "Pulling container image %s" % (
service.display,
)}
with faux_log.more_defaults(**defaults):
faux_log.update()
auth_registry = service.auth_registry
if auth_registry is not None:
ensure_kwargs = dict(
insecure_registry=service.auth_registry.insecure,
)
else:
ensure_kwargs = {}
try:
image_id = docker_ensure_image(
self.job.docker_client,
service,
handle=handle,
**ensure_kwargs
)
except docker.errors.APIError as ex:
faux_log.update(error=ex.explanation.decode())
all_okay = False
continue
if image_id is None:
faux_log.update(error="Not found")
all_okay = False
continue
faux_log.update(progress="Done")
all_okay &= self.runnable_inline(
service,
image_id,
handle,
faux_log,
)
return 0 if all_okay else 1
def runnable_inline(self, service, image_id, handle, faux_log):
""" Executed for each service """
raise NotImplementedError(
"You must override the 'runnable_inline' method"
)
class PushPrepStage(JobStageBase):
""" Ensure versioned tags haven't already been built """
slug = 'docker_push_prep'
def set_old_image_ids(self, handle):
"""
Set the ``_old_image_ids`` attribute on the job so that cleanup knows
to remove other images that this job replaces
"""
possible_tags_set = {
self.job.service.clone_and_update(tag=tag).image
for tag in self.job.possible_tags_set
}
tags_set = {
self.job.service.clone_and_update(tag=tag).image
for tag in self.job.tags_set
}
# This should work when docker/docker#18181 is fixed
# self.job.docker_client.images(name=self.job.docker_image_name)
# pylint:disable=protected-access
for image in self.job.docker_client.images():
matched_tags = {
tag for tag in possible_tags_set
if tag in image['RepoTags']
}
if matched_tags:
handle.write((
"Matched tags; will replace image '%s':\n" % image['Id']
).encode())
for tag in matched_tags:
handle.write((" %s\n" % tag).encode())
handle.flush()
repo_tags_set = set(image['RepoTags'])
# All this job's possible tags that are on the image
# Later, we clean up by removing the tags that our new image
# will be tagged with
to_cleanup = repo_tags_set & possible_tags_set
self.job._old_image_ids.extend(list(to_cleanup))
# If we're removing all the tags, delete the image too
if repo_tags_set.issubset(possible_tags_set):
self.job._old_image_ids.append(image['Id'])
handle.write(
" No tags remain; deleting the image too\n".encode())
else:
handle.write(
" Tags remain; won't delete the image\n".encode())
for tag in repo_tags_set - possible_tags_set:
handle.write((" %s\n" % tag).encode())
handle.flush()
# Don't immediately delete our own tags
for tag in tags_set:
try:
while True: # Remove tags until ValueError
self.job._old_image_ids.remove(tag)
except ValueError:
pass
def check_existing_job(self, handle):
""" Check the tag to see if there's a job already built """
handle.write("Checking for previous job... ".encode())
handle.flush()
for tag in self.job.tag_tags_set:
job = self.job.project.latest_job(
passed=True,
tag=tag,
)
if job is not None:
raise AlreadyBuiltError(
'Version %s of %s already built' % (
self.job.tag,
self.job.project.slug,
)
)
handle.write("OKAY!\n".encode())
handle.flush()
def runnable(self, handle):
if self.job.tag_push_candidate:
self.check_existing_job(handle)
self.set_old_image_ids(handle)
class ProvisionStage(InlineProjectStage):
"""
Provision the services that are required for this job
"""
slug = 'docker_provision'
def get_services(self):
return [
ServiceBase.from_image(conf['name'],
name=conf.get('alias', None),
meta={'config': conf},
)
for conf in self.job.job_config.services
]
def runnable_inline(self, service, image_id, handle, faux_log):
defaults = {'status': "Starting service %s" % service.display}
with faux_log.more_defaults(**defaults):
faux_log.update()
service_config = service.meta['config']
service_kwargs = {
key: value for key, value in service_config.items()
if key in ('command', 'environment')
}
try:
container = self.job.docker_client.create_container(
image=image_id,
**service_kwargs
)
self.job.docker_client.start(container['Id'])
# Store the provisioning info
# pylint:disable=protected-access
self.job._provisioned_containers.append({
'service': service,
'config': service_config,
'id': container['Id'],
})
faux_log.update(progress="Done")
except docker.errors.APIError as ex:
faux_log.update(error=ex.explanation.decode())
return False
return True
class UtilStage(InlineProjectStage):
""" Create, and run a utility stage container """
def __init__(self, job, workdir, slug_suffix, config):
super(UtilStage, self).__init__(job)
self.workdir = workdir
self.slug = "utility_%s" % slug_suffix
self.config = config
def get_services(self):
return [
ServiceBase.from_image(self.config['name'],
meta={'config': self.config},
)
]
def add_files(self, base_image_id, faux_log):
"""
Add files in the util config to a temporary image that will be used for
running the util
Args:
base_image_id (str): Image ID to use in the Dockerfile FROM
faux_log: The faux docker log object
Returns:
str: New image ID with files added
bool: False if failure
"""
input_files = self.config['input']
success = True
if not input_files:
faux_log.update(progress="Skipped")
return base_image_id
# Create the temp Dockerfile
tmp_file = py.path.local.mkdtemp(self.workdir).join("Dockerfile")
with tmp_file.open('w') as h_dockerfile:
h_dockerfile.write('FROM %s\n' % base_image_id)
for file_line in input_files:
h_dockerfile.write('ADD %s %s\n' % (
file_line['from'],
file_line['to'],
))
# Run the build
rel_workdir = self.workdir.bestrelpath(tmp_file)
output = self.job.docker_client.build(
path=self.workdir.strpath,
dockerfile=rel_workdir,
nocache=True,
rm=True,
forcerm=True,
stream=True,
)
# Watch for errors
for line in normalize_stream_lines(output):
if len(line) == 0:
continue
_, line_str = bytes_str(line)
data = json.loads(line_str)
if 'errorDetail' in data:
faux_log.update(**data)
success = False
self.job.docker_client.close()
if success:
image_id = built_docker_image_id(data)
if image_id is None:
faux_log.update(status="Couldn't determine new image ID",
progress="Failed")
return False
faux_log.update(progress="Done")
return image_id
else:
faux_log.update(progress="Failed")
return False
def run_util(self, image_id, handle, faux_log):
"""
Run the temp util image with the config command, and output the stream
to the given file handle
Args:
image_id (str): New util image to run, with files added
handle: File-like object to stream the Docker output to
faux_log: The faux docker log object
Returns:
tuple(str, bool): Container ID, and success/fail
"""
service_kwargs = {
key: value for key, value in self.config.items()
if key in ('command', 'environment')
}
container = {}
try:
container = self.job.docker_client.create_container(
image=image_id,
**service_kwargs
)
stream = self.job.docker_client.attach(
container['Id'],
stream=True,
)
self.job.docker_client.start(container['Id'])
except docker.errors.APIError as ex:
faux_log.update(error=ex.explanation.decode())
return container.get('Id', None), False
for line in stream:
if isinstance(line, bytes):
handle.write(line)
else:
handle.write(line.encode())
handle.flush()
return container['Id'], True
def retrieve_files(self, container_id, faux_log):
"""
Retrieve the files in the job config from the utility container
Args:
container_id (str): ID of a container to copy files from. Most likely
the completed utility container
faux_log: The faux docker log object
Returns:
bool: True when all files retrieved as expected, False otherwise
"""
output_files = self.config['output']
success = True
if not output_files:
faux_log.update(id="output", progress="Skipped")
for output_idx, output_set in enumerate(output_files):
if isinstance(output_set, dict):
try:
remote_spath = output_set['from']
except KeyError:
defaults = {
'id': "output.%s" % output_idx,
'progress': "Failed",
}
with faux_log.more_defaults(**defaults):
faux_log.update(status="Reading configuration")
faux_log.update(error="No required 'from' parameter")
success = False
continue
local_spath = output_set.get('to', '.')
else:
local_spath = '.'
remote_spath = output_set
defaults = {
'id': "output.%s" % local_spath,
'status': "Copying from '%s'" % remote_spath,
}
with faux_log.more_defaults(**defaults):
faux_log.update()
local_path = self.workdir.join(local_spath)
if not path_contained(self.workdir, local_path):
faux_log.update(
error="Path not contained within the working "
"directory",
progress="Failed",
)
success = False
continue
response = self.job.docker_client.copy(
container_id, remote_spath
)
intermediate = tarfile.open(name='output.tar',
mode='r|',
fileobj=response)
intermediate.extractall(local_path.strpath)
faux_log.update(progress="Done")
return success
def cleanup(self,
base_image_id,
image_id,
container_id,
faux_log,
):
"""
Cleanup after the util stage is done processing. Removes the contanier,
and temp image. Doesn't remove the image if it hasn't changed from the
base image
Args:
base_image_id (str): Original ID of the utility base image
image_id (str): ID of the image used by the utility run
container_id (str): ID of the container the utility run created
faux_log: The faux docker log object
Returns:
bool: Whether the cleanup was successful or not
"""
def cleanup_container():
""" Remove the container """
self.job.docker_client.remove_container(container_id)
return True
def cleanup_image():
""" Remove the image, unless it's base """
if image_id is None:
return False
min_len = min(len(base_image_id), len(image_id))
if base_image_id[:min_len] == image_id[:min_len]:
return False
self.job.docker_client.remove_image(image_id)
return True
success = True
cleanups = (
('container', cleanup_container, container_id),
('image', cleanup_image, image_id),
)
for obj_name, func, obj_id in cleanups:
defaults = {
'id': "cleanup.%s" % obj_id,
'status': "Cleaning up %s" % obj_name
}
with faux_log.more_defaults(**defaults):
faux_log.update()
try:
done = func()
faux_log.update(
progress="Done" if done else "Skipped"
)
except docker.errors.APIError as ex:
faux_log.update(error=ex.explanation.decode())
success = False
return success
def generate_data(self, service, base_image_id, handle, faux_log):
"""
Adds files, runs the container, retrieves output, and cleans up
"""
defaults = {
'id': "input",
'status': "Adding files",
}
with faux_log.more_defaults(**defaults):
faux_log.update()
image_id = self.add_files(base_image_id, faux_log)
if image_id is False:
return False
container_id = None
success = True
try:
defaults = {'status': "Starting utility %s" % service.display}
with faux_log.more_defaults(**defaults):
faux_log.update()
container_id, success = self.run_util(
image_id, handle, faux_log,
)
if success:
with faux_log.more_defaults(id="cleanup"):
faux_log.update(status="Collecting status")
exit_code = self.job.docker_client.inspect_container(
container_id
)['State']['ExitCode']
if exit_code != 0:
faux_log.update(
id="exit",
error="Exit code was %d" % exit_code
)
success = False
if success:
defaults = {'status': "Getting files"}
with faux_log.more_defaults(**defaults):
faux_log.update()
success = success & self.retrieve_files(
container_id, faux_log,
)
except Exception:
self.cleanup(base_image_id,
image_id,
container_id,
faux_log,
)
raise
else:
success = success & self.cleanup(base_image_id,
image_id,
container_id,
faux_log,
)
return success
def runnable_inline(self, service, base_image_id, handle, faux_log):
"""
Inline runner for utility projects
Args:
service (dockci.models.base.ServiceBase): Service that this stage
uses the image from
base_image_id (str): Image ID of the utility base
handle: Stream handle for raw output
faux_log: The faux docker log object
Returns:
bool: True on all success, False on at least 1 failure
"""
input_files = self.config['input']
output_files = self.config['output']
blob_store = None
if input_files:
blob_store = FilesystemBlob.from_files(
CONFIG.blob_path,
self.workdir,
[
self.workdir.join(input_data['from'])
for input_data
in input_files
],
meta={'image': service.image},
)
for output_data in output_files:
blob_store.add_data(output_data['to'])
if blob_store and blob_store.exists:
blob_store.extract()
return True
else:
ret = self.generate_data(service, base_image_id, handle, faux_log)
if ret and blob_store:
blob_store.write()
return ret
@classmethod
def slug_suffixes(cls, utility_names):
""" See ``slug_suffixes_gen`` """
return list(cls.slug_suffixes_gen(utility_names))
@classmethod
def slug_suffixes_gen(cls, utility_names):
"""
Generate utility names into unique slug suffixes by adding a counter to
the end, if there are duplicates
"""
totals = defaultdict(int)
for name in utility_names:
totals[name] += 1
counters = defaultdict(int)
for name in utility_names:
if totals[name] > 1:
counters[name] += 1
yield '%s_%d' % (name, counters[name])
else:
yield name
class DockerLoginStage(JobStageBase):
""" Find, and login to registries that have auth config """
slug = 'docker_login'
def __init__(self, job, workdir):
super(DockerLoginStage, self).__init__(job)
self.workdir = workdir
def login_registry(self, handle, username, password, email, base_name):
""" Handle login to the given registry model """
err = None
try:
response = self.job.docker_client.login(
username=username,
password=password,
email=email,
registry=base_name,
)
handle.write(('%s\n' % response['Status']).encode())
handle.flush()
except KeyError:
err = "Unknown response: %s" % response
except docker.errors.APIError as ex:
err = str(DockerAPIError(
self.job.docker_client, ex,
))
if err:
handle.write(('FAILED: %s\n' % err).encode())
handle.flush()
raise StageFailedError(
message=err,
handled=True,
)
def handle_registry(self, handle, base_name, registry):
""" Handle login if necessary """
auth_registry = (
registry is not None and (
registry.username is not None or
registry.password is not None or
registry.email is not None
)
)
if auth_registry:
handle.write(("Logging into '%s' registry: " % (
registry.display_name,
)).encode())
handle.flush()
self.login_registry(
handle,
registry.username,
registry.password,
registry.email,
base_name,
)
else:
display_name = registry.display_name if registry else base_name
handle.write(("Unauthenticated for '%s' registry\n" % (
display_name,
)).encode())
handle.flush()
def runnable(self, handle):
""" Load the Dockerfile, scan for FROM line, login """
registries = {}
# pylint:disable=protected-access
for stage in self.job._stage_objects.values():
if hasattr(stage, 'get_services'):
for service in stage.get_services():
registry_value = registries.setdefault(
service.base_registry, None,
)
registry = service.auth_registry
if registry_value is None and registry is not None:
registries[service.base_registry] = registry
for base_name, registry in registries.items():
self.handle_registry(handle, base_name, registry)
return 0
| isc | -4,595,545,965,522,747,000 | 32.735135 | 79 | 0.491267 | false |
sontek/bulby | setup.py | 1 | 1741 | '''
Setup configuration
'''
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
from pip.req import parse_requirements
from pip.download import PipSession
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(
'requirements/install.txt', session=PipSession()
)
reqs = [str(ir.req) for ir in install_reqs]
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'CHANGES.rst'), encoding='utf-8') as f:
long_description = '%s\n\n%s' % (long_description, f.read())
setup(
name='bulby',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1.dev0',
description='Manages the phillips hue lightbulbs',
long_description=long_description,
url='https://github.com/sontek/bulby.git',
author='John Anderson',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='etl extract transform load',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=reqs,
setup_requires=['setuptools-git'],
entry_points={
'paste.app_factory': [
'main=liberator:main',
],
},
)
| mit | -981,037,826,163,586,700 | 30.089286 | 78 | 0.677771 | false |
ray-project/ray | python/ray/util/collective/tests/distributed_gpu_tests/test_distributed_allreduce.py | 1 | 5765 | """Test the collective allreduice API on a distributed Ray cluster."""
import pytest
import ray
from ray.util.collective.types import ReduceOp
import cupy as cp
import torch
from ray.util.collective.tests.util import create_collective_workers
@pytest.mark.parametrize("group_name", ["default", "test", "123?34!"])
@pytest.mark.parametrize("world_size", [2, 3, 4])
def test_allreduce_different_name(ray_start_distributed_2_nodes_4_gpus,
group_name, world_size):
actors, _ = create_collective_workers(
num_workers=world_size, group_name=group_name)
results = ray.get([a.do_allreduce.remote(group_name) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
@pytest.mark.parametrize("array_size", [2, 2**5, 2**10, 2**15, 2**20])
def test_allreduce_different_array_size(ray_start_distributed_2_nodes_4_gpus,
array_size):
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([
a.set_buffer.remote(cp.ones(array_size, dtype=cp.float32))
for a in actors
])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones(
(array_size, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones(
(array_size, ), dtype=cp.float32) * world_size).all()
def test_allreduce_destroy(ray_start_distributed_2_nodes_4_gpus,
backend="nccl",
group_name="default"):
world_size = 4
actors, _ = create_collective_workers(world_size)
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * world_size).all()
# destroy the group and try do work, should fail
ray.get([a.destroy_group.remote() for a in actors])
with pytest.raises(RuntimeError):
results = ray.get([a.do_allreduce.remote() for a in actors])
# reinit the same group and all reduce
ray.get([
actor.init_group.remote(world_size, i, backend, group_name)
for i, actor in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones(
(10, ), dtype=cp.float32) * world_size * world_size).all()
assert (results[1] == cp.ones(
(10, ), dtype=cp.float32) * world_size * world_size).all()
def test_allreduce_multiple_group(ray_start_distributed_2_nodes_4_gpus,
backend="nccl",
num_groups=5):
world_size = 4
actors, _ = create_collective_workers(world_size)
for group_name in range(1, num_groups):
ray.get([
actor.init_group.remote(world_size, i, backend, str(group_name))
for i, actor in enumerate(actors)
])
for i in range(num_groups):
group_name = "default" if i == 0 else str(i)
results = ray.get([a.do_allreduce.remote(group_name) for a in actors])
assert (results[0] == cp.ones(
(10, ), dtype=cp.float32) * (world_size**(i + 1))).all()
def test_allreduce_different_op(ray_start_distributed_2_nodes_4_gpus):
world_size = 4
actors, _ = create_collective_workers(world_size)
# check product
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get(
[a.do_allreduce.remote(op=ReduceOp.PRODUCT) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 120).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 120).all()
# check min
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote(op=ReduceOp.MIN) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 2).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 2).all()
# check max
ray.wait([
a.set_buffer.remote(cp.ones(10, dtype=cp.float32) * (i + 2))
for i, a in enumerate(actors)
])
results = ray.get([a.do_allreduce.remote(op=ReduceOp.MAX) for a in actors])
assert (results[0] == cp.ones((10, ), dtype=cp.float32) * 5).all()
assert (results[1] == cp.ones((10, ), dtype=cp.float32) * 5).all()
@pytest.mark.parametrize("dtype",
[cp.uint8, cp.float16, cp.float32, cp.float64])
def test_allreduce_different_dtype(ray_start_distributed_2_nodes_4_gpus,
dtype):
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([a.set_buffer.remote(cp.ones(10, dtype=dtype)) for a in actors])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, ), dtype=dtype) * world_size).all()
assert (results[1] == cp.ones((10, ), dtype=dtype) * world_size).all()
def test_allreduce_torch_cupy(ray_start_distributed_2_nodes_4_gpus):
# import torch
world_size = 4
actors, _ = create_collective_workers(world_size)
ray.wait([actors[1].set_buffer.remote(torch.ones(10, ).cuda())])
results = ray.get([a.do_allreduce.remote() for a in actors])
assert (results[0] == cp.ones((10, )) * world_size).all()
ray.wait([actors[0].set_buffer.remote(torch.ones(10, ))])
ray.wait([actors[1].set_buffer.remote(cp.ones(10, ))])
with pytest.raises(RuntimeError):
results = ray.get([a.do_allreduce.remote() for a in actors])
| apache-2.0 | -6,473,080,763,864,086,000 | 40.47482 | 79 | 0.608846 | false |
stvstnfrd/edx-platform | common/djangoapps/util/tests/test_course.py | 1 | 4815 | """
Tests for course utils.
"""
import ddt
import mock
from django.conf import settings
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from common.djangoapps.util.course import get_link_for_about_page
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class TestCourseSharingLinks(ModuleStoreTestCase):
"""
Tests for course sharing links.
"""
def setUp(self):
super(TestCourseSharingLinks, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
# create test mongo course
self.course = CourseFactory.create(
org='test_org',
number='test_number',
run='test_run',
default_store=ModuleStoreEnum.Type.split,
social_sharing_url='test_social_sharing_url',
)
# load this course into course overview and set it's marketing url
self.course_overview = CourseOverview.get_from_id(self.course.id)
self.course_overview.marketing_url = 'test_marketing_url'
self.course_overview.save()
def get_course_sharing_link(self, enable_social_sharing, enable_mktg_site, use_overview=True):
"""
Get course sharing link.
Arguments:
enable_social_sharing(Boolean): To indicate whether social sharing is enabled.
enable_mktg_site(Boolean): A feature flag to decide activation of marketing site.
Keyword Arguments:
use_overview: indicates whether course overview or course descriptor should get
past to get_link_for_about_page.
Returns course sharing url.
"""
mock_settings = {
'FEATURES': {
'ENABLE_MKTG_SITE': enable_mktg_site
},
'SOCIAL_SHARING_SETTINGS': {
'CUSTOM_COURSE_URLS': enable_social_sharing
},
}
with mock.patch.multiple('django.conf.settings', **mock_settings):
course_sharing_link = get_link_for_about_page(
self.course_overview if use_overview else self.course
)
return course_sharing_link
@ddt.data(
(True, True, 'test_social_sharing_url'),
(False, True, 'test_marketing_url'),
(True, False, 'test_social_sharing_url'),
(False, False, '{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)),
)
@ddt.unpack
def test_sharing_link_with_settings(self, enable_social_sharing, enable_mktg_site, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url on settings manipulations.
"""
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=enable_social_sharing,
enable_mktg_site=enable_mktg_site,
)
assert actual_course_sharing_link == expected_course_sharing_link
@ddt.data(
(['social_sharing_url'], 'test_marketing_url'),
(['marketing_url'], 'test_social_sharing_url'),
(
['social_sharing_url', 'marketing_url'],
'{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)
),
)
@ddt.unpack
def test_sharing_link_with_course_overview_attrs(self, overview_attrs, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url when:
1. Neither marketing url nor social sharing url is set.
2. Either marketing url or social sharing url is set.
"""
for overview_attr in overview_attrs:
setattr(self.course_overview, overview_attr, None)
self.course_overview.save()
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=True,
enable_mktg_site=True,
)
assert actual_course_sharing_link == expected_course_sharing_link
@ddt.data(
(True, 'test_social_sharing_url'),
(
False,
'{}/courses/course-v1:test_org+test_number+test_run/about'.format(settings.LMS_ROOT_URL)
),
)
@ddt.unpack
def test_sharing_link_with_course_descriptor(self, enable_social_sharing, expected_course_sharing_link):
"""
Verify the method gives correct course sharing url on passing
course descriptor as a parameter.
"""
actual_course_sharing_link = self.get_course_sharing_link(
enable_social_sharing=enable_social_sharing,
enable_mktg_site=True,
use_overview=False,
)
assert actual_course_sharing_link == expected_course_sharing_link
| agpl-3.0 | -9,002,767,033,994,760,000 | 36.325581 | 117 | 0.635722 | false |
emmanuelle/scikits.image | skimage/io/_io.py | 2 | 5788 | __all__ = ['Image', 'imread', 'imread_collection', 'imsave', 'imshow', 'show',
'push', 'pop']
from skimage.io._plugins import call as call_plugin
from skimage.color import rgb2grey
import numpy as np
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Shared image queue
_image_stack = []
class Image(np.ndarray):
"""Class representing Image data.
These objects have tags for image metadata and IPython display protocol
methods for image display.
"""
tags = {'filename': '',
'EXIF': {},
'info': {}}
def __new__(cls, arr, **kwargs):
"""Set the image data and tags according to given parameters.
Input:
------
arr : ndarray
Image data.
kwargs : Image tags as keywords
Specified in the form ``tag0=value``, ``tag1=value``.
"""
x = np.asarray(arr).view(cls)
for tag, value in Image.tags.items():
setattr(x, tag, kwargs.get(tag, getattr(arr, tag, value)))
return x
def _repr_png_(self):
return self._repr_image_format('png')
def _repr_jpeg_(self):
return self._repr_image_format('jpeg')
def _repr_image_format(self, format_str):
str_buffer = StringIO.StringIO()
imsave(str_buffer, self, format_str=format_str)
return_str = str_buffer.getvalue()
str_buffer.close()
return return_str
def push(img):
"""Push an image onto the shared image stack.
Parameters
----------
img : ndarray
Image to push.
"""
if not isinstance(img, np.ndarray):
raise ValueError("Can only push ndarrays to the image stack.")
_image_stack.append(img)
def pop():
"""Pop an image from the shared image stack.
Returns
-------
img : ndarray
Image popped from the stack.
"""
return _image_stack.pop()
def imread(fname, as_grey=False, plugin=None, flatten=None,
**plugin_args):
"""Load an image from file.
Parameters
----------
fname : string
Image file name, e.g. ``test.jpg``.
as_grey : bool
If True, convert color images to grey-scale (32-bit floats).
Images that are already in grey-scale format are not converted.
plugin : str
Name of plugin to use (Python Imaging Library by default).
Other Parameters
----------------
flatten : bool
Backward compatible keyword, superseded by `as_grey`.
Returns
-------
img_array : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
# Backward compatibility
if flatten is not None:
as_grey = flatten
img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
if as_grey and getattr(img, 'ndim', 0) >= 3:
img = rgb2grey(img)
return Image(img)
def imread_collection(load_pattern, conserve_memory=True,
plugin=None, **plugin_args):
"""
Load a collection of images.
Parameters
----------
load_pattern : str or list
List of objects to load. These are usually filenames, but may
vary depending on the currently active plugin. See the docstring
for ``ImageCollection`` for the default behaviour of this parameter.
conserve_memory : bool, optional
If True, never keep more than one in memory at a specific
time. Otherwise, images will be cached once they are loaded.
Returns
-------
ic : ImageCollection
Collection of images.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
return call_plugin('imread_collection', load_pattern, conserve_memory,
plugin=plugin, **plugin_args)
def imsave(fname, arr, plugin=None, **plugin_args):
"""Save an image to file.
Parameters
----------
fname : str
Target filename.
arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)
Image data.
plugin : str
Name of plugin to use. By default, the different plugins are
tried (starting with the Python Imaging Library) until a suitable
candidate is found.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
def imshow(arr, plugin=None, **plugin_args):
"""Display an image.
Parameters
----------
arr : ndarray or str
Image data or name of image file.
plugin : str
Name of plugin to use. By default, the different plugins are
tried (starting with the Python Imaging Library) until a suitable
candidate is found.
Other parameters
----------------
plugin_args : keywords
Passed to the given plugin.
"""
if isinstance(arr, basestring):
arr = call_plugin('imread', arr, plugin=plugin)
return call_plugin('imshow', arr, plugin=plugin, **plugin_args)
def show():
'''Display pending images.
Launch the event loop of the current gui plugin, and display all
pending images, queued via `imshow`. This is required when using
`imshow` from non-interactive scripts.
A call to `show` will block execution of code until all windows
have been closed.
Examples
--------
>>> import skimage.io as io
>>> for i in range(4):
... io.imshow(np.random.random((50, 50)))
>>> io.show()
'''
return call_plugin('_app_show')
| bsd-3-clause | 2,946,779,465,856,008,000 | 24.724444 | 78 | 0.596579 | false |
zjuchenyuan/BioWeb | Lib/Bio/KDTree/KDTree.py | 1 | 8423 | # Copyright 2004 by Thomas Hamelryck.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""KD tree data structure for searching N-dimensional vectors.
The KD tree data structure can be used for all kinds of searches that
involve N-dimensional vectors, e.g. neighbor searches (find all points
within a radius of a given point) or finding all point pairs in a set
that are within a certain radius of each other. See "Computational Geometry:
Algorithms and Applications" (Mark de Berg, Marc van Kreveld, Mark Overmars,
Otfried Schwarzkopf). Author: Thomas Hamelryck.
"""
from __future__ import print_function
from numpy import sum, sqrt, array
from numpy import random
from Bio.KDTree import _CKDTree
def _dist(p, q):
diff = p - q
return sqrt(sum(diff * diff))
def _neighbor_test(nr_points, dim, bucket_size, radius):
"""Test all fixed radius neighbor search.
Test all fixed radius neighbor search using the
KD tree C module.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- radius: radius of search (typically 0.05 or so)
Returns true if the test passes.
"""
# KD tree search
kdt = _CKDTree.KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
kdt.set_data(coords)
neighbors = kdt.neighbor_search(radius)
r = [neighbor.radius for neighbor in neighbors]
if r is None:
l1 = 0
else:
l1 = len(r)
# now do a slow search to compare results
neighbors = kdt.neighbor_simple_search(radius)
r = [neighbor.radius for neighbor in neighbors]
if r is None:
l2 = 0
else:
l2 = len(r)
if l1 == l2:
# print("Passed.")
return True
else:
print("Not passed: %i != %i." % (l1, l2))
return False
def _test(nr_points, dim, bucket_size, radius):
"""Test neighbor search.
Test neighbor search using the KD tree C module.
Arguments:
- nr_points: number of points used in test
- dim: dimension of coords
- bucket_size: nr of points per tree node
- radius: radius of search (typically 0.05 or so)
Returns true if the test passes.
"""
# kd tree search
kdt = _CKDTree.KDTree(dim, bucket_size)
coords = random.random((nr_points, dim))
center = coords[0]
kdt.set_data(coords)
kdt.search_center_radius(center, radius)
r = kdt.get_indices()
if r is None:
l1 = 0
else:
l1 = len(r)
l2 = 0
# now do a manual search to compare results
for i in range(0, nr_points):
p = coords[i]
if _dist(p, center) <= radius:
l2 = l2 + 1
if l1 == l2:
# print("Passed.")
return True
else:
print("Not passed: %i != %i." % (l1, l2))
return False
class KDTree(object):
"""KD tree implementation (C++, SWIG python wrapper)
The KD tree data structure can be used for all kinds of searches that
involve N-dimensional vectors, e.g. neighbor searches (find all points
within a radius of a given point) or finding all point pairs in a set
that are within a certain radius of each other.
Reference:
Computational Geometry: Algorithms and Applications
Second Edition
Mark de Berg, Marc van Kreveld, Mark Overmars, Otfried Schwarzkopf
published by Springer-Verlag
2nd rev. ed. 2000.
ISBN: 3-540-65620-0
The KD tree data structure is described in chapter 5, pg. 99.
The following article made clear to me that the nodes should
contain more than one point (this leads to dramatic speed
improvements for the "all fixed radius neighbor search", see
below):
JL Bentley, "Kd trees for semidynamic point sets," in Sixth Annual ACM
Symposium on Computational Geometry, vol. 91. San Francisco, 1990
This KD implementation also performs a "all fixed radius neighbor search",
i.e. it can find all point pairs in a set that are within a certain radius
of each other. As far as I know the algorithm has not been published.
"""
def __init__(self, dim, bucket_size=1):
self.dim = dim
self.kdt = _CKDTree.KDTree(dim, bucket_size)
self.built = 0
# Set data
def set_coords(self, coords):
"""Add the coordinates of the points.
Arguments:
- coords: two dimensional NumPy array. E.g. if the points
have dimensionality D and there are N points, the coords
array should be NxD dimensional.
"""
if coords.min() <= -1e6 or coords.max() >= 1e6:
raise Exception("Points should lie between -1e6 and 1e6")
if len(coords.shape) != 2 or coords.shape[1] != self.dim:
raise Exception("Expected a Nx%i NumPy array" % self.dim)
self.kdt.set_data(coords)
self.built = 1
# Fixed radius search for a point
def search(self, center, radius):
"""Search all points within radius of center.
Arguments:
- center: one dimensional NumPy array. E.g. if the points have
dimensionality D, the center array should be D dimensional.
- radius: float>0
"""
if not self.built:
raise Exception("No point set specified")
if center.shape != (self.dim,):
raise Exception("Expected a %i-dimensional NumPy array"
% self.dim)
self.kdt.search_center_radius(center, radius)
def get_radii(self):
"""Return radii.
Return the list of distances from center after
a neighbor search.
"""
a = self.kdt.get_radii()
if a is None:
return []
return a
def get_indices(self):
"""Return the list of indices.
Return the list of indices after a neighbor search.
The indices refer to the original coords NumPy array. The
coordinates with these indices were within radius of center.
For an index pair, the first index<second index.
"""
a = self.kdt.get_indices()
if a is None:
return []
return a
# Fixed radius search for all points
def all_search(self, radius):
"""All fixed neighbor search.
Search all point pairs that are within radius.
Arguments:
- radius: float (>0)
"""
if not self.built:
raise Exception("No point set specified")
self.neighbors = self.kdt.neighbor_search(radius)
def all_get_indices(self):
"""Return All Fixed Neighbor Search results.
Return a Nx2 dim NumPy array containing
the indices of the point pairs, where N
is the number of neighbor pairs.
"""
a = array([[neighbor.index1, neighbor.index2] for neighbor in self.neighbors])
return a
def all_get_radii(self):
"""Return All Fixed Neighbor Search results.
Return an N-dim array containing the distances
of all the point pairs, where N is the number
of neighbor pairs..
"""
return [neighbor.radius for neighbor in self.neighbors]
if __name__ == "__main__":
nr_points = 100000
dim = 3
bucket_size = 10
query_radius = 10
coords = 200 * random.random((nr_points, dim))
kdtree = KDTree(dim, bucket_size)
# enter coords
kdtree.set_coords(coords)
# Find all point pairs within radius
kdtree.all_search(query_radius)
# get indices & radii of points
# indices is a list of tuples. Each tuple contains the
# two indices of a point pair within query_radius of
# each other.
indices = kdtree.all_get_indices()
radii = kdtree.all_get_radii()
print("Found %i point pairs within radius %f." % (len(indices), query_radius))
# Do 10 individual queries
for i in range(0, 10):
# pick a random center
center = random.random(dim)
# search neighbors
kdtree.search(center, query_radius)
# get indices & radii of points
indices = kdtree.get_indices()
radii = kdtree.get_radii()
x, y, z = center
print("Found %i points in radius %f around center (%.2f, %.2f, %.2f)." % (len(indices), query_radius, x, y, z))
| mit | 3,298,446,371,348,305,400 | 29.407942 | 119 | 0.628161 | false |
whcacademy/imageDownloader | googleImageDownload.py | 1 | 7660 | import requests
import os
import re
import time
from selenium import webdriver
import multiprocessing
import sys
from socket import error as SocketError
import errno
import argparse
import imghdr
import uuid
import csv
import codecs
import platform
import downloader
# define default chrome download path
global default_download_path
default_download_path = os.path.join(os.getcwd(), 'download_urls')
if not os.path.exists(default_download_path):
os.mkdir(default_download_path)
global isWindows
if re.search('windows', platform.platform(), re.IGNORECASE):
isWindows = True
else:
isWindows = False
# use selenium to get the list of URLs
def openBrowserRecursively(total, idName, browser):
try:
for i in range(total):
iterator = i * 100
url = r"https://www.google.com/search?q={word}&newwindow=1&biw=300&bih=629&tbm=isch&ijn={times}&start={start}"
try:
browser.get(url.format(word= idName, start=iterator,times = i))
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # raise to reset the connection
pass
time.sleep(1.5) # 1.5 seconds is the tuned time for HKU service not to be monitored and closed
except:
if isWindows:
os.system("taskkill /im chrome.exe /F")
else :
os.system("kill " + str(os.getpid()))
openBrowserRecursively(total, idName, browser)
# basic session setup
def setupSession():
session = requests.Session()
session.header = { 'User-Agent': "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0","Accept-Encoding": "gzip, deflate, sdch"}
return session
class GoogleDownloader():
def __init__(self, nameList, root, size, process, browser):
assert browser != None, "drive cannot be None!"
self.process = process
self.browser = browser
self.nameList = nameList
self.size = size
self.root = root
# main crawling start
def run(self):
for i in nameList:
self.oneID(i)
def oneID(self, name):
wordSearch = ''
subcategory = name.split(' ')
name = name.replace(' ', '_')
wordSearch = subcategory[0]
if len(subcategory[1:]) >= 1:
for pt in subcategory[1:]:
wordSearch += "+" + pt
print (wordSearch.encode('utf-8'))
total = int(self.size / 100)
openBrowserRecursively(total, wordSearch, self.browser)
# after trigger getting the file list, then the file will be
# download but name with f.txt
global default_download_path
filepath = default_download_path
try:
for i in range(total):
iterator = i * 100
filename = os.path.join("results", name +".txt")
newName = name + '_' + str(i) +'.txt'
# here is the hardcode part
# one may change to his or her own default downloading folder
if i == 0:
if "f.txt" in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,'f.txt'), os.path.join(filepath,newName))
else:
fileSpecial = "f (%d).txt" % i
if fileSpecial in os.listdir(filepath):
print ("change name to be " , newName.encode('utf-8'))
os.rename(os.path.join(filepath,fileSpecial), os.path.join(filepath,newName))
else:
print ("fail to find the file")
except:
print("something bad happen, maybe encountering some repeated names")
os.remove(os.path.join(filepath, 'f.txt'))
return
# after rename and locate the url list, then we conduct the final crawling part
indexList = [i for i in range(1, 101)]
try:
folderName = self.makeFolder(name)
for i in range(total):
newName = name + '_' + str(i) +'.txt'
with codecs.open(os.path.join(filepath,newName),'r', encoding="utf-8") as myfile:
file1 = myfile.read()
results = re.findall(r'"ou":"(.+?)"',file1)
self.process.map(_download,
zip(results, [folderName] * len(results), indexList[:len(results)]))
fileList = os.listdir(folderName)
self.dump_imInfo(folderName, sorted(fileList, key=lambda x: int(x.split('.')[0])), results)
except IOError:
print ("can not find the file called:" , str(newName).encode('utf-8') , "and it may be caused by the bad connection or bad file got from server")
def makeFolder(self, fileName):
try:
if not os.path.exists(os.path.join(self.root, fileName)):
os.mkdir(os.path.join(self.root, fileName))
else:
print('duplicated root name')
except OSError as e:
if e.errno != 17:
raise
else:
pass
return os.path.join(self.root, fileName)
def dump_imInfo(self, folderName, fileList, results):
try:
with open(os.path.join(folderName, 'imInfo.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['img_name', 'uuid', 'url'])
for file in fileList:
index = int(file.split('.')[0])
writer.writerow([index,str(uuid.uuid4().hex),str(results[index-1])])
except:
print('error happens when writing imageInfo, maybe caused by duplicated name')
# function to get one image specified with one url
def _download(args):
url, folderName, index = args
session = setupSession()
try:
# time out is another parameter tuned
# fit for the network about 10Mb
image = session.get(url, timeout = 5)
imageName = str(index)
with open(os.path.join(folderName, imageName),'wb') as fout:
fout.write(image.content)
fileExtension = imghdr.what(os.path.join(folderName, imageName))
if fileExtension is None:
os.remove(os.path.join(folderName, imageName))
else:
newName = imageName + '.' + str(fileExtension)
os.rename(os.path.join(folderName, imageName), os.path.join(folderName, newName))
except Exception as e:
print ("failed to download one pages with url of " + str(url))
# basic funciton to get id list
def readFile(filename):
_list=[]
with codecs.open (filename, 'r', encoding='utf-8') as fin:
line = fin.readline()
while line:
_list.append(str(line).rstrip())
line = fin.readline()
return _list
def arg_parse():
parser = argparse.ArgumentParser(description='Argument Parser for google image downloader')
parser.add_argument('--root', help='output file root',
default='results', type=str)
parser.add_argument('--filename', help='the name of the file which constain the id',
default='testlist.txt', type=str)
parser.add_argument('--size', help='number of image per id',
default=100, type=int)
parser.add_argument('--process', help='number of process in parallel',
default=100, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
start = time.time()
assert args.filename != None, "Name list cannot be None!"
# get all id as type of list of str
nameList = list(set(readFile(args.filename)))
# init processPool and browser driver
processPool = multiprocessing.Pool(args.process)
# init chrome driver with customized default download path
chromeOptions = webdriver.ChromeOptions()
preference = {'download.default_directory' : default_download_path,
'download.prompt_for_download': False}
chromeOptions.add_experimental_option("prefs",preference)
if isWindows:
chromedriver = os.path.join(os.getcwd(),'chromedriver.exe')
else:
chromedriver = os.path.join(os.getcwd(),'chromedriver')
browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=chromeOptions)
# check if the output folder exists or not
if not os.path.exists(args.root):
os.mkdir(args.root)
# construct the downloader instance
gdownloader = GoogleDownloader(nameList = nameList, root = args.root, size = args.size,
process = processPool, browser = browser)
gdownloader.run()
# finish running
end = time.time()
browser.close()
print ('task end, time consumed:', end - start, 'seconds')
| mit | -4,718,019,722,629,552,000 | 32.160173 | 154 | 0.692298 | false |
yashwardhan7/PiCapture | PiCapture.py | 1 | 8249 | #!/usr/bin/python
import os
import sys
import time
import glob
import shutil
import argparse
import datetime
import threading
import subprocess
logOnConsole = False
def log(str):
global logOnConsole
if logOnConsole:
print str
def initializeDir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
log('Created directory: {0}'.format(dirname))
def renameCapturedFiles(dirname, filePrefix, fileExtension):
capturedFiles = glob.glob('{0}/{1}*{2}'.format(dirname, filePrefix, fileExtension))
for file in capturedFiles:
newFilename = datetime.datetime.fromtimestamp(os.path.getctime(file)).strftime(
'{0}/%H%M%S{1}'.format(dirname, os.path.splitext(file)[1]))
os.rename(file, newFilename)
log('renamed {0} -> {1}'.format(file, newFilename))
def cmpImages(img1, img2):
if not os.path.isfile(img1):
return False
if not os.path.isfile(img2):
return False
# first check if the two images are different in size by a threshold
sz1 = os.stat(img1).st_size
sz2 = os.stat(img2).st_size
s1 = max(sz1,sz2)
s2 = max(1, min(sz1,sz2))
perc = ((s1/s2) - 1) * 100
if perc > 20:
return False
# next check the result of perceptual diff
try:
cmd = 'perceptualdiff -downsample 3 -colorfactor 0 {0} {1}'.format(img1, img2)
subprocess.check_output(cmd.split(), shell=False)
return True
except subprocess.CalledProcessError:
return False
except OSError:
print 'Error running perceptualdiff. Run apt-get install perceptualdiff.'
return False
def freeDiskSpace(dir):
for i in range(10): # retry few times
st = os.statvfs('/')
bavail = st.f_frsize * st.f_bavail # available disk space in bytes
if bavail < (1024*1024*512): # if available disk space is less than a threshold, free some more
canDelete = [os.path.join(dir, o) for o in sorted(os.listdir(dir)) if os.path.isdir(os.path.join(dir, o))]
if len(canDelete) <= 1:
break
log('freeing disk-space by deleting: {0}'.format(canDelete[0]))
shutil.rmtree(canDelete[0])
else:
break
def killProc(proc):
if proc:
proc.terminate()
def encodeTimelapseVideo(dir, fps):
# create symbolic link for *.jpg
# this is to workaround avconv issue with handling input file list
images = sorted(glob.glob('{0}/*.jpg'.format(dir)))
i=0
for img in images:
slnk = '{0}/img{1:0>6}.jpg'.format(dir, i)
log('symlink {0} --> {1}'.format(img, slnk))
try:
os.symlink(os.path.abspath(img), os.path.abspath(slnk))
except OSError:
pass
i+=1
# run avconv
cmd = 'avconv -r {0} -i {1}/img%06d.jpg -vcodec libx264 -crf 26 -g 15 -vf scale=576:352 -y {1}/vid.mp4'.format(fps, dir)
try:
log('Encoding video {0}'.format(dir))
subprocess.check_call(cmd.split(), shell=False)
except subprocess.CalledProcessError:
print 'Encoding failed.'
except OSError:
print 'Error running avconv. Run apt-get install libav-tools.'
# remove symlinks
slnks=glob.glob('{0}/img*.jpg'.format(dir))
for slnk in slnks:
log('remove symlink {0}'.format(slnk))
try:
os.remove(slnk)
except OSError:
pass
runBGThread=False
def bgThread(timeLapse, dir, imgPrefix, imgExt):
global runBGThread
log('Starting bgThread {0}'.format(dir))
while runBGThread:
try:
renameCapturedFiles(dir, imgPrefix, imgExt)
# process (erase similar images) recently captured images (.jpeg)
images = sorted(glob.glob('{0}/*{1}'.format(dir, imgExt)))
cImages = len(images)
if cImages <= 1:
time.sleep(timeLapse*4)
# if no more images were captured even after sleeping, exit this thread
if len(sorted(glob.glob('{0}/*{1}'.format(dir, imgExt)))) == cImages:
break
continue
prevImg = None
for img in images:
if not runBGThread:
renameCapturedFiles(dir, imgPrefix, imgExt)
break
if prevImg:
if cmpImages(prevImg, img):
# img is similar to prevImg, delete prevImg
os.remove(prevImg)
log('deleting dup: {0}'.format(prevImg))
else:
# prevImg is different than img, keep it and
# rename to .jpg so we dont process it again in next outer loop cycle
os.rename(prevImg, '{0}.jpg'.format(os.path.splitext(prevImg)[0]))
prevImg = img
except Exception, ex:
print "Exception in bgThread: {0} - {1}".format(type(ex).__name__, ex)
encodeTimelapseVideo(dir, 7)
log('Ending bgThread {0}'.format(dir))
# end bgThread
noirOptimization = '-ex night -drc high'
flipImage = '-hf -vf'
def captureImages(storageRoot, timeLapse=15):
global runBGThread
threadObj = None
bgThreadDir = None
filePrefix = 'img'
fileExt = '.jpeg'
while True:
try:
freeDiskSpace(storageRoot) # free disk space before starting capture
dt = datetime.datetime.now()
timeLeft = 86400 - (dt.hour*3600 + dt.minute*60 + dt.second)
runDuration = 600 # 10 min
if timeLeft < runDuration:
runDuration = timeLeft
# capture atleast 1 shot in a run
if timeLapse > runDuration:
timeLapse = runDuration
# start a run
currentDirname = '{0}/{1}'.format(storageRoot, dt.date().strftime('%Y%m%d'))
initializeDir(currentDirname)
cmdline = 'raspistill -w 1280 -h 960 --thumb none --exif none -n -q 50 -tl {0} -t {1} -o {2}'.format(
timeLapse*1000, runDuration*1000, '{0}/{1}%05d{2}'.format(currentDirname, filePrefix, fileExt))
proc = subprocess.Popen(cmdline.split() + noirOptimization.split(), shell=False)
log('Capturing images (pid={0}) to {1}'.format(proc.pid, currentDirname))
if (currentDirname != bgThreadDir) or (threadObj is None) or (not threadObj.isAlive()):
# if we are capturing in a different directory than bgThreadDir, start a new thread
# this thread will auto-exit when there are no new images being captured for currentDirname
runBGThread = True
bgThreadDir = currentDirname
threadObj = threading.Thread(target=bgThread, args=[timeLapse, bgThreadDir, filePrefix, fileExt])
threadObj.start()
time.sleep(runDuration)
killProc(proc)
except KeyboardInterrupt:
killProc(proc)
runBGThread = False # signal all bgthreads to exit
print 'waiting for background worker threads to exit'
return
def captureVideo(storageRoot, captureSpeed, videoStabilization):
filePrefix = 'vid'
fileExt = '.h264'
while True:
try:
freeDiskSpace(storageRoot) # free disk space before starting capture
dt = datetime.datetime.now()
runDuration = 86400 - (dt.hour*3600 + dt.minute*60 + dt.second)
# start a run
currentDirname = '{0}/{1}'.format(storageRoot, dt.date().strftime('%Y%m%d'))
initializeDir(currentDirname)
filename = '{0}/{1}00{2}'.format(currentDirname, filePrefix, fileExt)
cmdline = 'raspivid -w 800 -h 600 -qp 25 -fps {0} -t {1} -o {2}'.format(
30/captureSpeed, runDuration*1000, filename)
if videoStabilization:
cmdline += ' -vs'
proc = subprocess.Popen(cmdline.split() + noirOptimization.split(), shell=False)
log('Capturing video (pid={0}) to {1} @ {2}x'.format(proc.pid, filename, captureSpeed))
time.sleep(runDuration)
killProc(proc)
renameCapturedFiles(currentDirname, filePrefix, fileExt)
except KeyboardInterrupt:
killProc(proc)
renameCapturedFiles(currentDirname, filePrefix, fileExt)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='RapberryPi timelapse/video capture helper. Requires perceptualdiff which is used to cleanup duplicate captures in a timelapse.'
)
parser.add_argument('-d', metavar='directory', default='./cam', help='Directory where captured files are stored. Default: ./cam')
parser.add_argument('-l', action='store_true', default=False, help='Log information on console')
parser.add_argument('-t', metavar='seconds', type=int, help='Start timelapse capture with given duration in seconds')
parser.add_argument('-v', action='store_true', help='Start video capture')
parser.add_argument('-vf', metavar='speed_factor', default=2, type=int, help='Changes captured video speed by given factor. Default: 2')
parser.add_argument('-vs', action='store_true', default=False, help='Turn on video stabilization')
args = parser.parse_args()
logOnConsole = args.l
storageRoot = args.d
if args.v:
captureVideo(storageRoot, args.vf, args.vs)
elif args.t:
captureImages(storageRoot, args.t)
else:
parser.print_help()
| mit | 1,738,208,288,995,955,200 | 35.662222 | 142 | 0.697903 | false |
RuthAngus/kalesalad | code/kalesalad.py | 1 | 6367 | # Uses acf method to measure rotation periods for downloaded everest light
# curves.
import numpy as np
import matplotlib.pyplot as plt
import pyfits
from Kepler_ACF import corr_run
import os
from simple_acf import simple_acf
import sys
from multiprocessing import Pool
import pandas as pd
import glob
import astropy.stats as sps
import rotation as ro
import datetime
plotpar = {'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True}
plt.rcParams.update(plotpar)
def sigma_clip(y, nsigma=3, npoints=100):
"""
Sigma clipping for light curves.
"""
new_y = []
x = np.linspace(0, 100, len(y))
for i in range(int(len(y)/npoints)):
# section = y[i:i + npoints]
section = y[i*npoints:(i + 1)*npoints]
med, std = np.median(section), np.std(section)
mask = (med - nsigma*std < section) * (section < med + nsigma*std)
new_y.append(section[mask])
last_bit = y[(i+1)*npoints:]
med, std = np.median(last_bit), np.std(last_bit)
mask = (med - nsigma*std < last_bit) * (last_bit < med + nsigma*std)
new_y.append(last_bit[mask])
filtered_y = np.array([i for j in new_y for i in j])
return filtered_y
def process_data(file, c):
"""
Read the lightcurve from the fits format and sigma clip.
prefix (str): the 4 digit number at the beginning of the epic id, e.g.
"2011".
id (str): the 4 digit number at the end of the epic id, e.g. "26368".
c (str): campaign. e.g. "01"
"""
with pyfits.open(file) as hdulist:
time, flux = hdulist[1].data["TIME"], hdulist[1].data["FLUX"]
# out = hdulist[1].data["OUTLIER"]
m = np.isfinite(time) * np.isfinite(flux) #* (out < 1)
x, med = time[m], np.median(flux[m])
y = flux[m]/med - 1 # median normalise
yerr = np.ones_like(y) * 1e-5
if c == "1":
cut = 100
x, y, yerr = x[cut:], y[cut:], yerr[cut:]
# Sigma clip
filtered_y = sigma_clip(y)
m = np.nonzero(np.in1d(y, filtered_y))[0]
return x[m], y[m], yerr[m]
def run_acf(c, epic, clobber=False, plot=True):
"""
Run the ACF on a light curve in the specified campaign.
FOR PARALLEL RUNS.
c (str): campaign, e.g. "c01".
fn (str): fits file name for a target in campaign c.
"""
#period, acf_smooth, lags, rvar, peaks, dips, leftdips, rightdips, \
#bigpeaks = simple_acf(x, y)
v = "2.0"
filen = "hlsp_everest_k2_llc_{0}-c{1}_kepler_v{2}_lc.fits"\
.format(epic, c.zfill(2), v)
file = "data/c{0}/{1}".format(c.zfill(2), filen)
# Load time and flux
if not os.path.exists(file):
print(file, "file not found")
return None
try:
x, y, yerr = process_data(file, c=c)
except (IOError, ValueError):
print("Bad file", file)
return None
# compute the acf
period, acf_smooth, lags, rvar, peaks = simple_acf(x, y)
# make a plot
if plot:
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(x-x[0], y, "k.")
plt.xlim(0, max(lags))
plt.xlabel("$\mathrm{Time~(days)}$")
plt.ylabel("$\mathrm{Normalised~flux}$")
plt.subplot(2, 1, 2)
plt.plot(lags, acf_smooth, "k")
plt.xlabel("$\mathrm{lags~(days)}$")
plt.ylabel("$\mathrm{ACF}$")
plt.axvline(period, color="m")
plt.xlim(min(lags), max(lags))
plt.subplots_adjust(left=.16, bottom=.12, hspace=.4)
plt.savefig("acfs/{}_acf".format(epic))
# Measure LS period
star = ro.prot(kepid=epic, x=x, y=y, yerr=yerr)
pgram_period = star.pgram_ps(filter_period=10, plot=True, cutoff=30,
clobber=clobber)
return epic, period
def run_kalesalad(c, N, clobber=False):
"""
Measure all rotation periods in a campaign - non parallel (for tests).
"""
todays_date = datetime.date.today()
results_file = "c{0}_periods_{1}.txt".format(c, todays_date)
assert not os.path.exists(results_file), "Old data file found, delete " \
"before proceeding"
with open(results_file, "a") as f:
f.write("{0} {1} {2} {3}\n".format("epic_id", "ACF_period",
"pgram_period",
"pgram_period_err"))
# df = pd.read_csv("c{}_targets.txt".format(c.zfill(2)), dtype=str)
df = pd.read_csv("tgas_epic_dwarfs.csv")
epic_ids = df.epic_number[df.k2_campaign_str=="{}".format(int(c))]
acf_periods, pgram_periods, pgram_period_errs, epics = [np.zeros(N) for i
in range(4)]
for i, epic in enumerate(epic_ids[:N]):
v = "2.0"
filen = "hlsp_everest_k2_llc_{0}-c{1}_kepler_v{2}_lc.fits"\
.format(epic, c.zfill(2), v)
file = "data/c{0}/{1}".format(c.zfill(2), filen)
# Load time and flux
if os.path.exists(file):
try:
x, y, yerr = process_data(file, c=c)
except (IOError, ValueError):
print("Bad file", file)
return None
# Measure ACF period
_, acf_period = run_acf(c, epic, clobber=clobber, plot=True)
# Measure LS period
star = ro.prot(kepid=epic, x=x, y=y, yerr=yerr)
pgram_period = star.pgram_ps(plot=True)
with open(results_file, "a") as f:
f.write("{0} {1} {2} {3}\n".format(epic, acf_period,
pgram_period[0],
pgram_period[1]))
else:
print(file, "file not found")
if __name__ == "__main__":
from functools import partial
c = str(sys.argv[1])
# open("c{0}_periods.txt".format(c), "w")
run_kalesalad(c, 196, clobber=True)
# df = pd.read_csv("c{}_targets.txt".format(c.zfill(2)), dtype=str)
# fns = df["epid"].values
# f = partial(run_acf, c)
# pool = Pool()
# for val in pool.map(f, fns):
# if val is None:
# continue
# epic, acf_period, epic_period = val
# # append data to file
# with open("c{0}_periods.txt".format(c), "a") as f:
# f.write("{0} {1} \n".format(epic, period))
| mit | 9,121,794,386,829,699,000 | 31.484694 | 77 | 0.54594 | false |
Ecam-Eurobot-2017/main | code/raspberrypi/range_sensors.py | 1 | 2358 | from i2c import I2C
from enum import IntEnum
class Command(IntEnum):
MeasureOne = 1
MeasureAll = 2
Count = 3
class RangeSensor(I2C):
"""
This class is an abstraction around the I2C communication with
the range-sensor module.
Details of the "protocol" used:
The Raspberry Pi sends a byte to the module containing a command
and eventually a sensor number. Both informations are coded on 4 bits
totalling 8 bits together. The null byte, 0x00, is used to indicate errors.
This means that we have 15 possible commands and 15 possible sensors.
We only use 3 different commands:
1. MeasureOne (get_range): 0001 xxxx
This command requests the last measure of the sensor number xxxx
Sensor indices begin at 1. If the sensor does not exists, the module
will return a null byte. If the sensor does exists, two bytes will be
returned making up the 16 bits value together.
2. MeasureAll (get_ranges): 0010 0000
This command requests the last measures of all the available sensors.
The response to this request is a sequence of 2*n bytes where n is the
number of available sensors.
3. Count (get_number_of_sensors): 0011 0000
This command requests the number of available sensors.
The response is only one byte as there are only 15 possible sensors.
"""
def __init__(self, address):
"""Constructor takes the adress of the I2C module"""
super(RangeSensor, self).__init__(address)
self.n = self.get_number_of_sensors()
def get_range(self, sensor):
"""Requests the last measurement of a specific sensor"""
cmd = I2C.pack8(Command.MeasureOne, sensor)
self.send(cmd)
r = self.receive(2)
return I2C.pack16(r[1], r[0])
def get_ranges(self):
"""Requests the last measurements of all sensors"""
cmd = I2C.pack8(Command.MeasureAll, 0)
self.send(cmd)
data = self.receive(2 * self.n)
ranges = list()
for i in range(self.n):
j = i*2
ranges.append(I2C.pack16(data[(i*2)+1], data[i*2]))
return ranges
def get_number_of_sensors(self):
"""Requests the number of available sensors"""
cmd = I2C.pack8(Command.Count, 0)
self.send(cmd)
return self.receive()
| mit | -9,218,176,433,495,981,000 | 32.685714 | 79 | 0.651824 | false |
PedroMDuarte/thesis-hubbard-lda_evap | lda.py | 1 | 78053 |
import logging
# create logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font',**{'family':'serif'})
rc('text', usetex=True)
from vec3 import vec3, cross
import scipy.constants as C
"""
This file provides a way of calculating trap profiles in the local density
approximation. It needs to have a way of calculating:
* local band structure
* local tunneling rate, t
* local onsite interactions, U
From these thre quantities it can go ahead an use the solutions to the
homogeneous Fermi-Hubbard (FH) model to calculate the LDA.
In the homogeenous FH problem the chemical potential and the zero of
energy are always specified with respect to some point in the local band
structure. This point depends on how the Hamiltonian is written down:
A. Traditional hamiltonian.
i, j : lattice sites
<i,j> : nearest neighbors
s : spin
su : spin-up
sd : spin-down
Kinetic energy = -t \sum_{s} \sum_{<i,j>} a_{i,s}^{\dagger} a_{j,s}
Onsite energy = U \sum_{i} n_{i,su} n_{i,sd}
Using the traditional hamiltonian half-filling occurrs at a chemical
potential mu = U/2.
The zero of energy in the traditional hamiltonian is exactly midway through
the lowest band of the U=0 hamiltonian.
B. Half-filling hamiltonian
Kinetic energy = -t \sum_{s} \sum_{<i,j>} a_{i,s}^{\dagger} a_{j,s}
Onsite energy = U \sum_{i} ( n_{i,su} - 1/2 )( n_{i,sd} - 1/2 )
Using the half-filling hamiltonian half-filling occurrs at a chemical
potential mu = 0, a convenient value.
The zero of energy in the half-filling hamiltonian is shifted by U/2
with respect to the zero in the traditional hamiltonian.
....
Considerations for LDA
....
When doing the local density approximation (LDA) we will essentially have a
homogenous FH model that is shifted in energy by the enveloping potential of
the trap and by the local band structure. In the LDA the zero of energy is
defined as the energy of an atom at a point where there are no external
potentials. A global chemical potential will be defined with respect to the
LDA zero of energy.
To calculate the local thermodynamic quantities, such as density, entropy,
double occupancy, etc. we will use theoretical results for a homogeneous FH
model. The local chemical potential will be determined based on the local
value of the enveloping potential and the local band structure (which can be
obtained from the local lattice depth).
"""
import udipole
import scubic
from mpl_toolkits.mplot3d import axes3d
from scipy import integrate
from scipy import optimize
from scipy.interpolate import interp1d
# Load up the HTSE solutions
from htse import htse_dens, htse_doub, htse_entr, htse_cmpr
from nlce import nlce_dens, nlce_entr, nlce_spi, nlce_cmpr
import qmc, qmc_spi
def get_dens( T, t, mu, U, select='htse', ignoreLowT=False, verbose=True):
""" This function packages all three methods for obtaining
the thermodynamic quantities: htse, nlce, qmc"""
if select == 'htse':
return htse_dens( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
elif select == 'nlce':
return nlce_dens( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
def get_entr( T, t, mu, U, select='htse', ignoreLowT=False, verbose=True):
""" This function packages all three methods for obtaining
the thermodynamic quantities: htse, nlce, qmc"""
if select == 'htse':
return htse_entr( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
elif select == 'nlce':
return nlce_entr( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
def get_spi( T, t, mu, U, select='htse', ignoreLowT=False, verbose=True):
""" This function packages all three methods for obtaining
the thermodynamic quantities: htse, nlce, qmc"""
if select == 'htse':
return np.ones_like( t )
elif select == 'nlce':
return nlce_spi( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
def get_doub( T, t, mu, U, select='htse', ignoreLowT=False, verbose=True):
""" This function packages all three methods for obtaining
the thermodynamic quantities: htse, nlce, qmc"""
if select == 'htse':
return htse_doub( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
else:
raise "doublons not defined"
def get_cmpr( T, t, mu, U, select='htse', ignoreLowT=False, verbose=True):
""" This function packages all three methods for obtaining
the thermodynamic quantities: htse, nlce, qmc"""
if select == 'htse':
return htse_cmpr( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
elif select == 'nlce':
return nlce_cmpr( T, t, mu, U, ignoreLowT=ignoreLowT, verbose=verbose)
#...............
# LDA CLASS
#...............
class lda:
"""
This class provides the machinery to do the lda. It provides a way to
determine the global chemical potential for a given number or for a half
filled sample.
"""
def __init__( self, **kwargs ):
self.verbose = kwargs.get('verbose', False)
# Flag to ignore errors related to the slope of the density profile
# or the slope of the band bottom
self.ignoreSlopeErrors = kwargs.get( 'ignoreSlopeErrors',False)
# Flag to ignore errors related to the global chemical potential
# spilling into the beams
self.ignoreMuThreshold = kwargs.get('ignoreMuThreshold', False )
# Flag to ignore errors related to low temperatures beyond the reach
# of the htse
self.ignoreLowT = kwargs.get('ignoreLowT',False)
# Flag to ignore errors related to a non-vanishing density
# distribution within the extents
self.ignoreExtents = kwargs.get('ignoreExtents',False)
# The potential needs to offer a way of calculating the local band
# band structure via provided functions. The following functions
# and variables must exist:
#
# To calculate lda:
# - pot.l
# - pot.bandStructure( X,Y,Z )
#
# To make plots
# - pot.unitlabel
# - pot.Bottom( X,Y,Z )
# - pot.LatticeMod( X,Y,Z )
# - pot.Info()
# - pot.EffAlpha()
# - pot.firstExcited( X,Y,Z )
# - pot.S0( X,Y,Z )
self.pot = kwargs.pop( 'potential', None)
if self.pot is None:
raise ValueError(\
'A potential needs to be defined to carry out the LDA')
# The potential also contains the lattice wavelength, which defines
# the lattice spacing
self.a = self.pot.l / 2.
# Initialize temperature. Temperature is specified in units of
# Er. For a 7 Er lattice t = 0.04 Er
self.T = kwargs.get('Temperature', 0.40 )
# Initialize interactions.
self.a_s = kwargs.get('a_s',300.)
# Initialize extents
self.extents = kwargs.pop('extents', 40.)
# Initialize the type of Hubbard solution
# type can be: 'htse', 'nlce', 'qmc'
self.select = kwargs.get('select','htse')
# Make a cut line along 111 to calculate integrals of the
# thermodynamic quantities
# set the number of points to use in the cut
if self.select == 'htse':
NPOINTS = 320
else:
NPOINTS = 80
OVERRIDE_NPOINTS = kwargs.pop('override_npoints', None)
if OVERRIDE_NPOINTS is not None:
NPOINTS = OVERRIDE_NPOINTS
direc111 = (np.arctan(np.sqrt(2)), np.pi/4)
unit = vec3(); th = direc111[0]; ph = direc111[1]
unit.set_spherical( 1., th, ph);
t111, self.X111, self.Y111, self.Z111, lims = \
udipole.linecut_points( direc=direc111, extents=self.extents,\
npoints=NPOINTS)
# Below we get the signed distance from the origin
self.r111 = self.X111*unit[0] + self.Y111*unit[1] + self.Z111*unit[2]
# Obtain band structure and interactions along the 111 direction
bandbot_111, bandtop_111, \
self.Ezero_111, self.tunneling_111, self.onsite_t_111 = \
self.pot.bandStructure( self.X111, self.Y111, self.Z111)
# The onsite interactions are scaled up by the scattering length
self.onsite_t_111 = self.a_s * self.onsite_t_111
self.onsite_111 = self.onsite_t_111 * self.tunneling_111
# Lowst value of E0 is obtained
self.LowestE0 = np.amin( bandbot_111 )
self.Ezero0_111 = self.Ezero_111.min()
#---------------------
# CHECK FOR NO BUMP IN BAND BOTTOM
#---------------------
# Calculate first derivative of the band bottom at small radii, to
# assess whether or not the potential is a valid potential
# (no bum in the center due to compensation )
positive_r = np.logical_and( self.r111 > 0. , self.r111 < 10. )
# absolute energy of the lowest band, elb
elb = bandbot_111[ positive_r ]
elb_slope = np.diff( elb ) < -1e-4
n_elb_slope = np.sum( elb_slope )
if n_elb_slope > 0:
msg = "ERROR: Bottom of the band has a negative slope"
if self.verbose:
print msg
print elb
print np.diff(elb)
print elb_slope
if not self.ignoreSlopeErrors:
raise ValueError(msg)
else:
if self.verbose:
print "OK: Bottom of the band has positive slope up to "\
+ "r111 = 10 um"
#------------------------------
# SET GLOBAL CHEMICAL POTENTIAL
#------------------------------
# Initialize global chemical potential and atom number
# globalMu can be given directly or can be specified via the
# number of atoms. If the Natoms is specified we calculate
# the required gMu using this function:
muHalfMott = self.onsite_111.max()/2.
if 'globalMu' in kwargs.keys():
# globalMu is given in Er, and is measured from the value
# of Ezero at the center of the potential
# When using it in the phase diagram it has to be changed to
# units of the tunneling
self.globalMu = kwargs.get('globalMu', 0.15)
if self.globalMu == 'halfMott':
self.globalMu = muHalfMott \
+ kwargs.get('halfMottPlus',0.)
else :
self.Number = kwargs.get('Natoms', 3e5)
fN = lambda x : self.getNumber( muHalfMott + x,self.T, \
verbose=False)- self.Number
if self.verbose :
print "Searching for globalMu => N=%.0f, "% self.Number,
muBrent = kwargs.get('muBrent', (-0.2, 0.3)) # Maybe the default
# muBrent range should
# be U dependent
muBrentShift = kwargs.get('muBrentShift', 0. )
muBrent = ( muBrent[0] + muBrentShift * muHalfMott, \
muBrent[1] + muBrentShift * muHalfMott )
try:
muBrentOpt, brentResults = \
optimize.brentq(fN, muBrent[0], muBrent[1], \
xtol=2e-3, rtol=1e-2, full_output=True)
#print "fN(muBrentOpt) = ", fN(muBrentOpt)
self.globalMu = muHalfMott + muBrentOpt
except Exception as e:
errstr = 'f(a) and f(b) must have different signs'
if errstr in e.message:
print "Natoms = {:.4g}".format(self.Number)
print "mu0 = %.2f --> Nlda = %.2g" % \
(muBrent[0], fN(muBrent[0]) + self.Number )
print "mu1 = %.2f --> Nlda = %.2g" % \
(muBrent[1], fN(muBrent[1]) + self.Number )
raise
if self.verbose:
print "gMu=%.3f, " % brentResults.root,
print "n_iter=%d, " % brentResults.iterations,
print "n eval=%d, " % brentResults.function_calls,
print "converge?=", brentResults.converged
#---------------------
# EVAPORATION ENERGIES
#---------------------
# Calculate energies to estimate eta parameter for evaporation
self.globalMuZ0 = self.Ezero0_111 + self.globalMu
# Make a cut line along 100 to calculate the threshold for evaporation
direc100 = (np.pi/2, 0.)
t100, self.X100, self.Y100, self.Z100, lims = \
udipole.linecut_points( direc=direc100, extents = 1200.)
# Obtain band structure along the 100 direction
bandbot_100, bandtop_100, self.Ezero_100, self.tunneling_100 = \
self.pot.bandStructure( self.X100, self.Y100, self.Z100, \
getonsite=False)
self.Ezero0_100 = self.Ezero_100.min()
# evapTH0_100 accounts for situations in which there is a local barrier
# as you move along 100 to the edge
self.evapTH0_100 = bandbot_100.max()
# Once past the local barrier we calculate the bandbot energy along
# a beam
self.beamBOT_100 = bandbot_100[-1]
if self.verbose:
#This obtains the value of g0, careful when using anisotropic params
scubic.get_max_comp( self.pot, 650., self.T, verbose=False)
#------------------------------------------------
# CONTROL THE CHEMICAL POTENTIAL SO THAT IT STAYS
# BELOW THE THRESHOLD FOR EVAPORATION
#------------------------------------------------
# For a valid scenario we need
# self.globalMuZ0 < self.beamBOT_100
# self.globalMuZ0 < self.evapTH0_100
# Otherwise the density distribution will spill out into the beams
# and the assumption of spherical symmetry won't be valid.
if self.globalMuZ0 + self.T*1.2 > self.evapTH0_100:
msg = "ERROR: Chemical potential exceeds the evaporation threshold "
if self.verbose:
print msg
print " mu0 = %.3f" % self.globalMuZ0
print " T = %.3f" % (self.T*1.2)
print " Eth = %.3f" % self.evapTH0_100
if not self.ignoreMuThreshold :
raise ValueError(msg)
elif self.verbose:
print "OK: Chemical potential is below evaporation threshold."
if self.globalMuZ0 + self.T*1.2 > self.beamBOT_100:
msg = "ERROR: Chemical potential exceeds the bottom of the band " +\
"along 100"
if self.verbose:
print msg
print " mu0 = %.3f" % self.globalMuZ0
print " T = %.3f" % (self.T*1.2)
print "E100 = %.3f" % self.beamBOT_100
if not self.ignoreMuThreshold :
raise ValueError(msg)
elif self.verbose:
print "OK: Chemical potential is below the bottom of the band " +\
"along 100"
#-----------------------
# ESTIMATION OF ETA EVAP
#-----------------------
mu = self.globalMuZ0 - self.LowestE0
th = self.evapTH0_100 - self.LowestE0
self.EtaEvap = th/mu
self.DeltaEvap = th - mu
if False:
print "mu global = %.3g" % self.globalMuZ0
print "evap th = %.3g" % self.evapTH0_100
print "lowest E = %.3g" % self.LowestE0
print "mu = %.3g" % mu
print "th = %.3g" % th
print "eta = %.3g" % (th/mu)
print "th-mu = %.3g" % (th-mu)
# After the chemical potential is established the local chemical
# potential along 111 can be defined. It is used to calculate other
# thermodynamic quantities
gMuZero = self.Ezero0_111 + self.globalMu
self.localMu_t_111= (gMuZero - self.Ezero_111) / self.tunneling_111
self.localMu_111= (gMuZero - self.Ezero_111)
localMu = gMuZero - self.Ezero_111
# If the global chemical potential is fixed then the lda
# class can have an easier time calculating the necessary
# temperature to obtain a certain entropy per particle.
# This option is provided here:
if ( 'globalMu' in kwargs.keys() and 'SN' in kwargs.keys() ) \
or kwargs.get('forceSN',False):
self.SN = kwargs.get('SN', 2.0)
# Shut down density extent errors during the search
igExt = self.ignoreExtents
self.ignoreExtents = True
fSN = lambda x : self.getEntropy(x) / \
self.getNumber(self.globalMu, x ) \
- self.SN
if self.verbose:
print "Searching for T => S/N=%.2f, "% self.SN
TBrent = kwargs.get('TBrent',(0.14,1.8))
try:
Tres, TbrentResults = \
optimize.brentq(fSN, TBrent[0], TBrent[1], \
xtol=2e-3, rtol=2e-3, full_output=True)
if self.verbose:
print "Brent T result = %.2f Er" % Tres
self.T = Tres
except Exception as e:
errstr = 'f(a) and f(b) must have different signs'
if errstr in e.message:
print "T0 = %.3f --> fSN = %.3f" % \
(TBrent[0], fSN(TBrent[0]) )
print "T1 = %.3f --> fSN = %.3f" % \
(TBrent[1], fSN(TBrent[1]) )
raise
print "Search for S/N=%.2f did not converge"%self.SN
print "Temperature will be set at T = %.2f Er" % self.T
print "ERROR:"
print e.message
print self.pot.Info()
print
self.ignoreExtents = igExt
# Once the temperature is established we can calculate the ratio
# of temperature to chemical potential, with the chem. potential
# measured from the lowest energy state
self.Tmu = self.T / mu
# We define an etaF_star which allows us to control for atoms
# spilling along the beams in situations with non-zero temperature
# such as what we can access with HTSE
self.etaF_star = self.EtaEvap - self.Tmu*1.4
# Obtain trap integrated values of the thermodynamic quantities
self.Number = self.getNumber( self.globalMu, self.T )
self.Entropy = self.getEntropy( self.T)
def Info( self ):
"""
Returns a latex string with the information pertinent to the
hubbard parameters
"""
# Tunneling label
tmin = self.tunneling_111.min()
tmin_kHz = tmin * 29.2
tlabel = '$t=%.2f\,\mathrm{kHz}$'%(tmin_kHz)
# Scattering length
aslabel = '$a_{s}=%.0fa_{0}$' % self.a_s
# U/t label
Utlabel = '$U/t=%.1f$' % self.onsite_t_111.max()
# Temperature label
Tlabel = '$T/t=%.1f$' % (self.T/self.tunneling_111).max()
LDAlabel = '\n'.join( [ aslabel, Utlabel, Tlabel, tlabel ] )
return LDAlabel
def ThermoInfo( self ):
"""
Returns a latex string with the information pertinent to the
calculated thermodynamic quantities.
"""
wLs = self.pot.w
waists = sum( wLs, ())
wL = np.mean(waists)
self.NumberD = self.getNumberD( self.T )
rlabel = r'$\mathrm{HWHM} = %.2f\,w_{L}$' % ( self.getRadius()/wL )
Nlabel = r'$N=%.2f\times 10^{5}$' % (self.Number/1e5)
Dlabel = r'$D=%.3f$' % ( self.NumberD / self.Number )
Slabel = r'$S/N=%.2fk_{\mathrm{B}}$' % ( self.Entropy / self.Number )
return '\n'.join([rlabel, Nlabel, Dlabel, Slabel])
def getRadius( self ):
"""
This function calculates the HWHM (half-width at half max) of the
density distribution.
"""
gMu = self.globalMu
T = self.T
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
density = get_dens( T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
posradii = self.r111 >= 0.
r111pos = self.r111[ posradii]
posdens = density[ posradii ]
try:
hwhm = r111pos[ posdens - posdens[0]/2. < 0.][0]
return hwhm
except:
print r111pos
print posdens
raise
def get_localMu_t( self, gMu):
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
return localMu_t
def getDensity( self, gMu, T ):
"""
This function calculates and returns the density along
the 111 direction
Parameters
----------
gMu : global chemical potential
"""
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
density = get_dens( T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
return self.r111 , density
def getEntropy111( self, gMu, T ):
"""
This function calculates and returns the entropy along
the 111 direction
Parameters
----------
gMu : global chemical potential
"""
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
entropy = get_entr( T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
return self.r111 , entropy
def getSpi111( self, gMu, T ):
"""
This function calculates and returns the structure factor along
the 111 direction
Parameters
----------
gMu : global chemical potential
"""
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
spi = get_spi( T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
return self.r111 , spi
def getBulkSpi( self, **kwargs ):
r111, n111 = self.getDensity( self.globalMu, self.T )
t0 = self.tunneling_111.min()
Tspi = kwargs.get( 'Tspi', self.T / t0 )
logger.info( "Tspi in units of t0 = " + str(Tspi) )
Tspi = Tspi * t0
logger.info( "Tspi in units of Er = " + str(Tspi) )
logger.info( " t0 in units of Er = " + str( t0 ) )
gMuZero = self.Ezero0_111 + self.globalMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
# Get the bulk Spi and the Spi profile
# ALSO
# Get the overall S/N and the s profiles, both s per lattice site
# and s per particle
spibulk, spi, overall_entropy, entropy, lda_number, density = \
qmc_spi.spi_bulk( r111, n111, localMu_t, Tspi, \
self.tunneling_111, self.onsite_111, **kwargs )
do_k111 = kwargs.get('do_k111', False)
if do_k111:
# Get the compressibility
k111 = get_cmpr( self.T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
k111htse_list = []
for Thtse in [ 1.8, 2.3, 2.8]:
k111htse = get_cmpr( Thtse*t0, self.tunneling_111, localMu, \
self.onsite_111, select='htse',\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
k111htse_list.append( [Thtse, k111htse] )
else:
k111 = None
k111htse_list = []
U111 = self.onsite_111 / self.tunneling_111
return spibulk, spi, r111, n111, U111, self.tunneling_111, \
overall_entropy, entropy, lda_number, density, k111, \
k111htse_list
def getSpiFineGrid( self, **kwargs):
direc111 = (np.arctan(np.sqrt(2)), np.pi/4)
unit = vec3(); th = direc111[0]; ph = direc111[1]
unit.set_spherical( 1., th, ph);
numpoints = kwargs.pop('numpoints', 80 )
t111, X111_, Y111_, Z111_, lims_ = \
udipole.linecut_points( direc=direc111, extents=self.extents,\
npoints=numpoints)
# Below we get the signed distance from the origin
r111_ = X111_*unit[0] + Y111_*unit[1] + Z111_*unit[2]
# Obtain band structure and interactions along the 111 direction
bandbot_111_, bandtop_111_, \
Ezero_111_, tunneling_111_, onsite_t_111_ = \
self.pot.bandStructure( X111_, Y111_, Z111_)
# The onsite interactions are scaled up by the scattering length
onsite_t_111_ = self.a_s * onsite_t_111_
onsite_111_ = onsite_t_111_ * tunneling_111_
# Lowst value of E0 is obtained
LowestE0_ = np.amin( bandbot_111_ )
Ezero0_111_ = Ezero_111_.min()
t0 = tunneling_111_.min()
Tspi = kwargs.get( 'Tspi', self.T / t0 )
Tspi = Tspi * t0
localMu_ = self.globalMu + Ezero0_111_ - Ezero_111_
localMu_t_ = localMu_ / tunneling_111_
# Get the density
density_ = get_dens( self.T, tunneling_111_, localMu_, \
onsite_111_, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
# Get the bulk Spi and the Spi profile
# ALSO
# Get the overall S/N and the s profiles, both s per lattice site
# and s per particle
kwargs['do_kappa']=True
spibulk, spi, overall_entropy, entropy, \
lda_number, density, compr = \
qmc_spi.spi_bulk( r111_, density_, localMu_t_, Tspi, \
tunneling_111_, onsite_111_, **kwargs )
U111 = onsite_111_ / tunneling_111_
#return spibulk, spi, r111, n111, U111, self.tunneling_111, \
# overall_entropy, entropy, lda_number, density
return r111_, spi, density_, compr, localMu_t_ * tunneling_111_
def getNumber( self, gMu, T, **kwargs):
"""
This function calculates and returns the total number of atoms.
It integrates along 111 assuming a spherically symmetric sample.
Parameters
----------
gMu : global chemical potential
"""
kwverbose = kwargs.get('verbose', None)
if kwverbose is not None:
NVerbose = kwverbose
else:
NVerbose = self.verbose
gMuZero = self.Ezero0_111 + gMu
localMu = gMuZero - self.Ezero_111
localMu_t = localMu / self.tunneling_111
density = get_dens( T, self.tunneling_111, localMu, \
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT, \
verbose=self.verbose)
# Under some circumnstances the green compensation can
# cause dips in the density profile. This can happen only
# if the green beam waist is smaller than the IR beam waist
# Experimentally we have seen that we do not handle these very
# well, so we want to avoid them at all cost
# The occurence of this is flagged by a change in the derivative
# of the radial density. This derivative should always be negative.
# If the green beam waist is larger than the IR beam waist, then
# the problem with the non-monotonic density can also be found
# when trying to push the compensation such that muGlobal gets
# close to the evaporation threshold
# This can be pathological because it leads to an accumulation of atoms
# that are not trapped but this lda code integrates over them and counts
# them anyways.
# To avoid any of the two situations desribed above we force the
# density to decrease monotonically over the extent of our calculation.
# If the density slope is positive the we report it as an error
#
# find the point at which the density changes derivative
radius_check = 1e-3
posradii = self.r111 > radius_check
posdens = density[ posradii ]
neg_slope = np.diff( posdens ) > 1e-4
n_neg_slope = np.sum( neg_slope )
if n_neg_slope > 0:
msg = "ERROR: Radial density profile along 111 " + \
"has a positive slope"
if NVerbose:
print msg
print "\n\nradius check start = ", radius_check
print posdens
print np.diff( posdens ) > 1e-4
if not self.ignoreSlopeErrors:
raise ValueError(msg)
elif NVerbose:
print "OK: Radial density profile along 111 decreases " + \
"monotonically."
if False:
print " posdens len = ",len(posdens)
print " n_neg_slope = ",n_neg_slope
# Checks that the density goes to zero within the current extents
if kwverbose is not None and kwverbose is False:
edgecuttof = 10.
else:
edgecuttof = 2e-2
if posdens[-1]/posdens[0] > edgecuttof:
msg = "ERROR: Density does not vanish within current " + \
"extents"
if not self.ignoreExtents:
print msg
print posdens[0]
print posdens[-1]
print posdens
print self.pot.g0
#print "etaF = ", self.EtaEvap
#print "etaFstar = ", self.etaF_star
#print "extents = ", self.extents
raise ValueError(msg)
if NVerbose:
print msg
print posdens[0]
print posdens[-1]
print self.pot.g0
dens = density[~np.isnan(density)]
r = self.r111[~np.isnan(density)]
self.PeakD = dens.max()
return np.power(self.a,-3)*2*np.pi*integrate.simps(dens*(r**2),r)
def getNumberD( self, T):
"""
This function calculates and returns the total number of doublons.
It integrates along 111 assuming a spherically symmetric sample.
"""
doublons = get_doub( T, self.tunneling_111, self.localMu_111,\
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT,\
verbose=self.verbose)
doub = doublons[~np.isnan(doublons)]
r = self.r111[~np.isnan(doublons)]
return 2.*np.power(self.a,-3)*2*np.pi*integrate.simps(doub*(r**2),r)
def getEntropy( self, T):
"""
This function calculates and returns the total entropy.
It integrates along 111 assuming a spherically symmetric sample.
"""
entropy = get_entr( T, self.tunneling_111, self.localMu_111,\
self.onsite_111, select=self.select,\
ignoreLowT=self.ignoreLowT,\
verbose=self.verbose)
entr = entropy[~np.isnan(entropy)]
r = self.r111[~np.isnan(entropy)]
return np.power(self.a,-3)*2*np.pi*integrate.simps(entr*(r**2),r)
def column_density( self ):
"""
This function calculates and returns the column density of the
cloud
"""
return None
def plotLine( lda0, **kwargs):
# Flag to ignore errors related to low temperatures beyond the reach
# of the htse
ignoreLowT = kwargs.get('ignoreLowT',False)
scale = 0.9
figGS = plt.figure(figsize=(6.0*scale,4.2*scale))
gs3Line = matplotlib.gridspec.GridSpec(2,2,\
width_ratios=[1.6, 1.], height_ratios=[2.0,1.4],\
wspace=0.25,
left=0.13, right=0.90,
bottom=0.15, top=0.78)
tightrect = [0.,0.00, 0.95, 0.84]
Ax1 = [];
Ymin =[]; Ymax=[]
line_direction = kwargs.pop('line_direction', '111')
direcs = { \
'100':(np.pi/2, 0.), \
'010':(np.pi/2, np.pi/2), \
'001':(0., np.pi), \
'111':(np.arctan(np.sqrt(2)), np.pi/4) }
labels = { \
'100':'$(\mathbf{100})$', \
'010':'$(\mathbf{010})$', \
'001':'$(\mathbf{001})$', \
'111':'$(\mathbf{111})$' }
cutkwargs = kwargs.pop( 'cutkwargs', { } )
cutkwargs['direc'] = direcs[ line_direction ]
cutkwargs['ax0label']= labels[ line_direction ]
cutkwargs['extents']= kwargs.pop('extents', 40.)
t, X,Y,Z, lims = udipole.linecut_points( **cutkwargs )
potkwargs = kwargs.pop( 'potkwargs', { } )
potkwargs['direc'] = direcs[ line_direction ]
potkwargs['ax0label']= labels[ line_direction ]
potkwargs['extents']= kwargs.pop('x1lims', (lims[0],lims[1]))[1]
tp, Xp,Yp,Zp, lims = udipole.linecut_points( **potkwargs )
kwargs['suptitleY'] = 0.96
kwargs['foottextY'] = 0.84
x1lims = kwargs.get('x1lims', (lims[0],lims[1]))
ax1 = figGS.add_subplot( gs3Line[0:3,0] )
ax1.set_xlim( *x1lims )
ax1.grid()
ax1.grid(which='minor')
ax1.set_xlabel('$\mu\mathrm{m}$ '+cutkwargs['ax0label'], fontsize=13.)
ax1.set_ylabel( lda0.pot.unitlabel, rotation=0, fontsize=13., labelpad=15 )
ax1.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(20) )
ax1.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(10) )
ax1.yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(7) )
ax1.yaxis.set_minor_locator( matplotlib.ticker.AutoMinorLocator() )
ax2 = figGS.add_subplot( gs3Line[0,1] )
ax3 = None
#ax2.grid()
ax2.set_xlabel('$\mu\mathrm{m}$', fontsize=12, labelpad=0)
#ax2.set_ylabel('$n$', rotation=0, fontsize=14, labelpad=11 )
ax2.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(20) )
ax2.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(10) )
#----------------------------------
# CALCULATE ALL RELEVANT QUANTITIES
#----------------------------------
# All the relevant lines are first calculated here
bandbot_XYZ, bandtop_XYZ, \
Ezero_XYZ, tunneling_XYZ, onsite_t_XYZ = \
lda0.pot.bandStructure( X, Y, Z )
# The onsite interactions are scaled up by the scattering length
onsite_t_XYZ = lda0.a_s * onsite_t_XYZ
onsite_XYZ = onsite_t_XYZ * tunneling_XYZ
Ezero0_XYZ = Ezero_XYZ.min()
bottom = lda0.pot.Bottom( X, Y, Z )
lattmod = lda0.pot.LatticeMod( X, Y, Z )
excbot_XYZ, exctop_XYZ = lda0.pot.firstExcited( X, Y, Z )
# Offset the chemical potential for use in the phase diagram
localMu_XYZ = ( lda0.globalMu + lda0.Ezero0_111 - Ezero_XYZ )
# Obtain the thermodynamic quantities
density_XYZ = get_dens( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
doublon_XYZ = get_doub( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
entropy_XYZ = get_entr( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
# All the potential lines are recalculated to match the potential
# xlims
bandbot_XYZp, bandtop_XYZp, \
Ezero_XYZp, tunneling_XYZp, onsite_t_XYZp = \
lda0.pot.bandStructure( Xp, Yp, Zp )
# The onsite interactions are scaled up by the scattering length
onsite_t_XYZp = lda0.a_s * onsite_t_XYZp
onsite_XYZp = onsite_t_XYZp * tunneling_XYZp
Ezero0_XYZp = Ezero_XYZp.min()
bottomp = lda0.pot.Bottom( Xp, Yp, Zp )
lattmodp = lda0.pot.LatticeMod( Xp, Yp, Zp )
excbot_XYZp, exctop_XYZp = lda0.pot.firstExcited( Xp, Yp, Zp )
# Offset the chemical potential for use in the phase diagram
localMu_XYZp = ( lda0.globalMu + lda0.Ezero0_111 - Ezero_XYZp )
#--------------------------
# SETUP LINES TO BE PLOTTED
#--------------------------
# A list of lines to plot is generated
# Higher zorder puts stuff in front
toplot = [
{'x':tp,\
'y':(bandbot_XYZp, Ezero_XYZp ), 'color':'blue', 'lw':2., \
'fill':True, 'fillcolor':'blue', 'fillalpha':0.75,\
'zorder':10, 'label':'$\mathrm{band\ lower\ half}$'},
{'x':tp,\
'y':(Ezero_XYZp + onsite_XYZp, bandtop_XYZp + onsite_XYZp), \
'color':'purple', 'lw':2., \
'fill':True, 'fillcolor':'plum', 'fillalpha':0.75,\
'zorder':10, 'label':'$\mathrm{band\ upper\ half}+U$'},
{'x':tp,\
'y':(excbot_XYZp, exctop_XYZp ), 'color':'red', 'lw':2., \
'fill':True, 'fillcolor':'pink', 'fillalpha':0.75,\
'zorder':2, 'label':'$\mathrm{first\ excited\ band}$'},
{'x':tp,\
'y':np.ones_like(Xp)*lda0.globalMuZ0, 'color':'limegreen',\
'lw':2,'zorder':1.9, 'label':'$\mu_{0}$'},
{'x':tp,\
'y':np.ones_like(Xp)*lda0.evapTH0_100, 'color':'#FF6F00', \
'lw':2,'zorder':1.9, 'label':'$\mathrm{evap\ threshold}$'},
{'x':tp,\
'y':bottomp,'color':'gray', 'lw':0.5,'alpha':0.5},
{'x':tp,\
'y':lattmodp,'color':'gray', 'lw':1.5,'alpha':0.5,\
'label':r'$\mathrm{lattice\ potential\ \ }\lambda\times10$'} \
]
toplot = toplot + [
{'y':density_XYZ, 'color':'blue', 'lw':1.75, \
'axis':2, 'label':'$n$'},
{'y':doublon_XYZ, 'color':'red', 'lw':1.75, \
'axis':2, 'label':'$d$'},
{'y':entropy_XYZ, 'color':'black', 'lw':1.75, \
'axis':2, 'label':'$s_{L}$'},
#{'y':density-2*doublons, 'color':'green', 'lw':1.75, \
# 'axis':2, 'label':'$n-2d$'},
#{'y':self.localMu_t, 'color':'cyan', 'lw':1.75, \
# 'axis':2, 'label':r'$\mu$'},
]
toplot = toplot + [
{'y':entropy_XYZ/density_XYZ, 'color':'gray', 'lw':1.75, \
'axis':3, 'label':'$s_{N}$'} ]
lattlabel = '\n'.join( list( lda0.pot.Info() ) + \
[lda0.pot.TrapFreqsInfo() + r',\ ' \
+ lda0.pot.EffAlpha(), \
'$\eta_{F}=%.2f$'%lda0.EtaEvap + '$,$ ' \
'$\Delta_{F}=%.2fE_{R}$'%lda0.DeltaEvap, \
] )
toplot = toplot + [ {'text':True, 'x': -0.1, 'y':1.02, 'tstring':lattlabel,
'ha':'left', 'va':'bottom', 'linespacing':1.4} ]
toplot = toplot + [ {'text':True, 'x': 1.0, 'y':1.02, 'tstring':lda0.Info(),
'ha':'right', 'va':'bottom', 'linespacing':1.4} ]
toplot = toplot + [ {'text':True, 'x': 0., 'y':1.02, \
'tstring':lda0.ThermoInfo(), \
'ha':'left', 'va':'bottom', 'axis':2, \
'linespacing':1.4} ]
#--------------------------
# ITERATE AND PLOT
#--------------------------
Emin =[]; Emax=[]
for p in toplot:
if not isinstance(p,dict):
ax1.plot(t,p); Emin.append(p.min()); Emax.append(p.max())
else:
if 'text' in p.keys():
whichax = p.get('axis',1)
axp = ax2 if whichax ==2 else ax1
tx = p.get('x', 0.)
ty = p.get('y', 1.)
ha = p.get('ha', 'left')
va = p.get('va', 'center')
ls = p.get('linespacing', 1.)
tstring = p.get('tstring', 'empty')
axp.text( tx,ty, tstring, ha=ha, va=va, linespacing=ls,\
transform=axp.transAxes)
elif 'figprop' in p.keys():
figsuptitle = p.get('figsuptitle', None)
figGS.suptitle(figsuptitle, y=kwargs.get('suptitleY',1.0),\
fontsize=14)
figGS.text(0.5,kwargs.get('foottextY',1.0),\
p.get('foottext',None),fontsize=14,\
ha='center')
elif 'y' in p.keys():
if 'x' in p.keys():
x = p['x']
else:
x = t
labelstr = p.get('label',None)
porder = p.get('zorder',2)
fill = p.get('fill', False)
ydat = p.get('y',None)
whichax = p.get('axis',1)
if whichax == 3:
if ax3 is None:
ax3 = ax2.twinx()
axp = ax3
else:
axp = ax2 if whichax ==2 else ax1
if ydat is None: continue
if fill:
axp.plot(x,ydat[0],
lw=p.get('lw',2.),\
color=p.get('color','black'),\
alpha=p.get('fillalpha',0.5),\
zorder=porder,\
label=labelstr
)
axp.fill_between( x, ydat[0], ydat[1],\
lw=p.get('lw',2.),\
color=p.get('color','black'),\
facecolor=p.get('fillcolor','gray'),\
alpha=p.get('fillalpha',0.5),\
zorder=porder
)
if whichax == 1:
Emin.append( min( ydat[0].min(), ydat[1].min() ))
Emax.append( max( ydat[0].max(), ydat[1].max() ))
else:
axp.plot( x, ydat,\
lw=p.get('lw',2.),\
color=p.get('color','black'),\
alpha=p.get('alpha',1.0),\
zorder=porder,\
label=labelstr
)
if whichax == 1:
Emin.append( ydat.min() )
Emax.append( ydat.max() )
if whichax == 3:
ax3.tick_params(axis='y', colors=p.get('color','black'))
#print labelstr
#print Emin
#print Emax
if ax3 is not None:
ax3.yaxis.set_major_locator( \
matplotlib.ticker.MaxNLocator(6, prune='upper') )
handles2, labels2 = ax2.get_legend_handles_labels()
handles3, labels3 = ax3.get_legend_handles_labels()
handles = handles2 + handles3
labels = labels2 + labels3
ax2.legend( handles, labels, bbox_to_anchor=(1.25,1.0), \
loc='lower right', numpoints=1, labelspacing=0.2, \
prop={'size':10}, handlelength=1.1, handletextpad=0.5 )
Emin = min(Emin); Emax=max(Emax)
dE = Emax-Emin
# Finalize figure
x2lims = kwargs.get('x2lims', (lims[0],lims[1]))
ax2.set_xlim( *x2lims )
y0,y1 = ax2.get_ylim()
if y1 == 1. :
ax2.set_ylim( y0 , y1 + (y1-y0)*0.05)
y2lims = kwargs.get('y2lims', None)
if y2lims is not None:
ax2.set_ylim( *y2lims)
y3lims = kwargs.get('y3lims', None)
if y3lims is not None:
ax3.set_ylim( *y3lims)
ymin, ymax = Emin-0.05*dE, Emax+0.05*dE
Ymin.append(ymin); Ymax.append(ymax); Ax1.append(ax1)
Ymin = min(Ymin); Ymax = max(Ymax)
#print Ymin, Ymax
for ax in Ax1:
ax.set_ylim( Ymin, Ymax)
if 'ax1ylim' in kwargs.keys():
ax1.set_ylim( *kwargs['ax1ylim'] )
Ax1[0].legend( bbox_to_anchor=(1.1,-0.15), \
loc='lower left', numpoints=1, labelspacing=0.2,\
prop={'size':9.5}, handlelength=1.1, handletextpad=0.5 )
#gs3Line.tight_layout(figGS, rect=tightrect)
return figGS
def plotMathy( lda0, **kwargs):
# Flag to ignore errors related to low temperatures beyond the reach
# of the htse
ignoreLowT = kwargs.get('ignoreLowT',False)
scale = 0.9
figGS = plt.figure(figsize=(6.0*scale,4.2*scale))
#figGS = plt.figure(figsize=(5.6,4.2))
gs3Line = matplotlib.gridspec.GridSpec(3,2,\
width_ratios=[1.6, 1.], height_ratios=[2.2,0.8,1.2],\
wspace=0.2, hspace=0.24,
left = 0.15, right=0.95, bottom=0.14, top=0.78)
#tightrect = [0.,0.00, 0.95, 0.88]
Ax1 = [];
Ymin =[]; Ymax=[]
line_direction = kwargs.pop('line_direction', '111')
direcs = { \
'100':(np.pi/2, 0.), \
'010':(np.pi/2, np.pi/2), \
'001':(0., np.pi), \
'111':(np.arctan(np.sqrt(2)), np.pi/4) }
labels = { \
'100':'$(\mathbf{100})$', \
'010':'$(\mathbf{010})$', \
'001':'$(\mathbf{001})$', \
'111':'$(\mathbf{111})$' }
cutkwargs = kwargs.pop( 'cutkwargs', {} )
cutkwargs['direc'] = direcs[ line_direction ]
cutkwargs['ax0label']= labels[ line_direction ]
cutkwargs['extents']= kwargs.pop('extents', 40.)
t, X,Y,Z, lims = udipole.linecut_points( **cutkwargs )
ax1 = figGS.add_subplot( gs3Line[0:2,0] )
ax1.grid()
ax1.grid(which='minor')
ax1.set_ylabel( lda0.pot.unitlabel, rotation=0, fontsize=16, labelpad=15 )
ax1.xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(7) )
#ax1.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(20) )
ax1.yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(7) )
#ax1.yaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(1.) )
ax2 = figGS.add_subplot( gs3Line[0,1] )
ax2.grid()
#ax2.set_ylabel('$n$', rotation=0, fontsize=14, labelpad=11 )
ax2.xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(6) )
#ax2.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(10) )
ax3 = figGS.add_subplot( gs3Line[2,0] )
ax3.grid()
ax3.yaxis.set_major_locator( matplotlib.ticker.MaxNLocator(3) )
ax3.xaxis.set_major_locator( matplotlib.ticker.MaxNLocator(7) )
#----------------------------------
# CALCULATE ALL RELEVANT QUANTITIES
#----------------------------------
# All the relevant lines are first calculated here
# In the Mathy plot the x-axis is the local lattice depth
s0_XYZ = lda0.pot.S0( X, Y, Z)[0]
ax1.set_xlim( s0_XYZ.min(), s0_XYZ.max() )
ax3.set_xlim( s0_XYZ.min(), s0_XYZ.max() )
x2lims = kwargs.get('x2lims', None)
if x2lims is not None:
ax2.set_xlim( *x2lims)
else:
ax2.set_xlim( s0_XYZ.min(), s0_XYZ.max() )
ax3.set_xlabel('$s_{0}\,(E_{R}) $', fontsize=13)
ax2.set_xlabel('$s_{0}\,(E_{R}) $', fontsize=12, labelpad=0)
bandbot_XYZ, bandtop_XYZ, \
Ezero_XYZ, tunneling_XYZ, onsite_t_XYZ = \
lda0.pot.bandStructure( X, Y, Z )
# The onsite interactions are scaled up by the scattering length
onsite_t_XYZ = lda0.a_s * onsite_t_XYZ
onsite_XYZ = onsite_t_XYZ * tunneling_XYZ
Ezero0_XYZ = Ezero_XYZ.min()
bottom = lda0.pot.Bottom( X, Y, Z )
lattmod = lda0.pot.LatticeMod( X, Y, Z )
Mod = np.amin( lda0.pot.S0( X, Y, Z), axis=0 )
deltas0 = ( s0_XYZ.max()-s0_XYZ.min() )
lattmod = lda0.pot.Bottom( X, Y, Z ) + \
Mod*np.power( np.cos( 2.*np.pi* s0_XYZ *10./deltas0 ), 2)
excbot_XYZ, exctop_XYZ = lda0.pot.firstExcited( X, Y, Z )
# Offset the chemical potential for use in the phase diagram
localMu_XYZ = ( lda0.globalMu + lda0.Ezero0_111 - Ezero_XYZ )
# Obtain the thermodynamic quantities
density_XYZ = get_dens( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
doublon_XYZ = get_doub( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
entropy_XYZ = get_entr( lda0.T, tunneling_XYZ, localMu_XYZ, \
onsite_XYZ, select=lda0.select, ignoreLowT=ignoreLowT )
#--------------------------
# SETUP LINES TO BE PLOTTED
#--------------------------
# A list of lines to plot is generated
# Higher zorder puts stuff in front
toplot = [
{'y':(bandbot_XYZ, Ezero_XYZ ), 'color':'blue', 'lw':2., \
'fill':True, 'fillcolor':'blue', 'fillalpha':0.5,\
'zorder':10, 'label':'$\mathrm{band\ lower\ half}$'},
{'y':(Ezero_XYZ + onsite_XYZ, bandtop_XYZ + onsite_XYZ), \
'color':'purple', 'lw':2., \
'fill':True, 'fillcolor':'plum', 'fillalpha':0.5,\
'zorder':10, 'label':'$\mathrm{band\ upper\ half}+U$'},
{'y':(Ezero_XYZ, Ezero_XYZ + onsite_XYZ), \
'color':'black', 'lw':2., \
'fill':True, 'fillcolor':'gray', 'fillalpha':0.85,\
'zorder':10, 'label':'$\mathrm{mott\ gap}$'},
#{'y':(excbot_XYZ, exctop_XYZ ), 'color':'red', 'lw':2., \
# 'fill':True, 'fillcolor':'pink', 'fillalpha':0.75,\
# 'zorder':2, 'label':'$\mathrm{first\ excited\ band}$'},
{'y':np.ones_like(X)*lda0.globalMuZ0, 'color':'limegreen',\
'lw':2,'zorder':1.9, 'label':'$\mu_{0}$'},
{'y':np.ones_like(X)*lda0.evapTH0_100, 'color':'#FF6F00', \
'lw':2,'zorder':1.9, 'label':'$\mathrm{evap\ threshold}$'},
#{'y':bottom,'color':'gray', 'lw':0.5,'alpha':0.5, 'axis':3},
{'y':lattmod,'color':'gray', 'lw':1.5,'alpha':0.5, \
'axis':3,\
'label':r'$\mathrm{lattice\ potential\ \ }\lambda\times10$'} \
]
entropy_per_particle = kwargs.pop('entropy_per_particle', False)
if entropy_per_particle:
toplot = toplot + [
{'y':entropy_XYZ/density_XYZ, 'color':'black', 'lw':1.75, \
'axis':2, 'label':'$s_{N}$'} ]
else:
toplot = toplot + [
{'y':density_XYZ, 'color':'blue', 'lw':1.75, \
'axis':2, 'label':'$n$'},
{'y':doublon_XYZ, 'color':'red', 'lw':1.75, \
'axis':2, 'label':'$d$'},
{'y':entropy_XYZ, 'color':'black', 'lw':1.75, \
'axis':2, 'label':'$s_{L}$'},
#{'y':density-2*doublons, 'color':'green', 'lw':1.75, \
# 'axis':2, 'label':'$n-2d$'},
#{'y':self.localMu_t, 'color':'cyan', 'lw':1.75, \
# 'axis':2, 'label':r'$\mu$'},
]
lattlabel = '\n'.join( list( lda0.pot.Info() ) + \
[lda0.pot.TrapFreqsInfo() + r',\ ' \
+ lda0.pot.EffAlpha(), \
'$\eta_{F}=%.2f$'%lda0.EtaEvap + '$,$ ' \
'$\Delta_{F}=%.2fE_{R}$'%lda0.DeltaEvap, \
] )
toplot = toplot + [ {'text':True, 'x': 0., 'y':1.02, 'tstring':lattlabel,
'ha':'left', 'va':'bottom', 'linespacing':1.4} ]
toplot = toplot + [ {'text':True, 'x': 1.0, 'y':1.02, 'tstring':lda0.Info(),
'ha':'right', 'va':'bottom', 'linespacing':1.4} ]
toplot = toplot + [ {'text':True, 'x': 0., 'y':1.02, \
'tstring':lda0.ThermoInfo(), \
'ha':'left', 'va':'bottom', 'axis':2, \
'linespacing':1.4} ]
#--------------------------
# ITERATE AND PLOT
#--------------------------
kwargs['suptitleY'] = 0.96
kwargs['foottextY'] = 0.84
# For every plotted quantity I use only lthe positive radii
Emin =[]; Emax=[]
positive = t > 0.
xarray = s0_XYZ[ positive ]
for p in toplot:
if not isinstance(p,dict):
p = p[positive]
ax1.plot(xarray,p); Emin.append(p.min()); Emax.append(p.max())
else:
if 'text' in p.keys():
whichax = p.get('axis',1)
axp = ax2 if whichax ==2 else ax1
tx = p.get('x', 0.)
ty = p.get('y', 1.)
ha = p.get('ha', 'left')
va = p.get('va', 'center')
ls = p.get('linespacing', 1.)
tstring = p.get('tstring', 'empty')
axp.text( tx,ty, tstring, ha=ha, va=va, linespacing=ls,\
transform=axp.transAxes)
elif 'figprop' in p.keys():
figsuptitle = p.get('figsuptitle', None)
figGS.suptitle(figsuptitle, y=kwargs.get('suptitleY',1.0),\
fontsize=14)
figGS.text(0.5,kwargs.get('foottextY',1.0),\
p.get('foottext',None),fontsize=14,\
ha='center')
elif 'y' in p.keys():
whichax = p.get('axis',1)
#if whichax == 2 : continue
axp = ax2 if whichax ==2 else ax3 if whichax == 3 else ax1
labelstr = p.get('label',None)
porder = p.get('zorder',2)
fill = p.get('fill', False)
ydat = p.get('y',None)
if ydat is None: continue
if fill:
ydat = ( ydat[0][positive], ydat[1][positive] )
axp.plot(xarray,ydat[0],
lw=p.get('lw',2.),\
color=p.get('color','black'),\
alpha=p.get('fillalpha',0.5),\
zorder=porder,\
label=labelstr
)
axp.fill_between( xarray, ydat[0], ydat[1],\
lw=p.get('lw',2.),\
color=p.get('color','black'),\
facecolor=p.get('fillcolor','gray'),\
alpha=p.get('fillalpha',0.5),\
zorder=porder
)
if whichax == 1:
Emin.append( min( ydat[0].min(), ydat[1].min() ))
Emax.append( max( ydat[0].max(), ydat[1].max() ))
else:
ydat = ydat[ positive ]
axp.plot( xarray, ydat,\
lw=p.get('lw',2.),\
color=p.get('color','black'),\
alpha=p.get('alpha',1.0),\
zorder=porder,\
label=labelstr
)
if whichax == 1:
Emin.append( ydat.min() )
Emax.append( ydat.max() )
ax2.legend( bbox_to_anchor=(0.03,1.02), \
loc='upper left', numpoints=1, labelspacing=0.2, \
prop={'size':10}, handlelength=1.1, handletextpad=0.5 )
Emin = min(Emin); Emax=max(Emax)
dE = Emax-Emin
# Finalize figure
y0,y1 = ax2.get_ylim()
ax2.set_ylim( y0 , y1 + (y1-y0)*0.1)
ymin, ymax = Emin-0.05*dE, Emax+0.05*dE
Ymin.append(ymin); Ymax.append(ymax); Ax1.append(ax1)
Ymin = min(Ymin); Ymax = max(Ymax)
for ax in Ax1:
ax.set_ylim( Ymin, Ymax)
if 'ax1ylim' in kwargs.keys():
ax1.set_ylim( *kwargs['ax1ylim'] )
Ax1[0].legend( bbox_to_anchor=(1.1,0.1), \
loc='upper left', numpoints=1, labelspacing=0.2,\
prop={'size':11}, handlelength=1.1, handletextpad=0.5 )
#gs3Line.tight_layout(figGS, rect=tightrect)
return figGS
def CheckInhomog( lda0, **kwargs ):
"""This function will make a plot along 111 of the model parameters:
U, t, U/t, v0.
It is useful to assess the degree of inhomogeneity in our system"""
# Prepare the figure
fig = plt.figure(figsize=(9.,4.2))
lattlabel = '\n'.join( list( lda0.pot.Info() ) )
lattlabel = '\n'.join( [ i.split( r'$\mathrm{,}\ $' )[0].replace('s','v') \
for i in lda0.pot.Info() ] )
Nlabel = r'$N=%.2f\times 10^{5}$' % (lda0.Number/1e5)
Slabel = r'$S/N=%.2fk_{\mathrm{B}}$' % ( lda0.Entropy / lda0.Number )
thermolabel = '\n'.join([Nlabel, Slabel])
ldainfoA = '\n'.join(lda0.Info().split('\n')[:2])
ldainfoB = '\n'.join(lda0.Info().split('\n')[-2:])
fig.text( 0.05, 0.98, lattlabel, ha='left', va='top', linespacing=1.2)
fig.text( 0.48, 0.98, ldainfoA, ha='right', va='top', linespacing=1.2)
fig.text( 0.52, 0.98, ldainfoB, ha='left', va='top', linespacing=1.2)
fig.text( 0.95, 0.98, thermolabel, ha='right', va='top', linespacing=1.2)
#fig.text( 0.05, 0.86, "Sample is divided in 5 bins, all containing" +\
# " the same number of atoms (see panel 2).\n" + \
# "Average Fermi-Hubbard parameters $n$, $U$, $t$, " +\
# "and $U/t$ are calculated in each bin (see panels 1, 3, 4, 5 )" )
gs = matplotlib.gridspec.GridSpec( 2,4, wspace=0.18,\
left=0.1, right=0.9, bottom=0.05, top=0.98)
# Setup axes
axn = fig.add_subplot(gs[0,0])
axnInt = fig.add_subplot(gs[0,3])
axU = fig.add_subplot(gs[1,0])
axt = fig.add_subplot(gs[1,1])
axUt = fig.add_subplot(gs[1,2])
axv0 = fig.add_subplot(gs[1,3])
axEntr = fig.add_subplot( gs[0,1] )
axSpi = fig.add_subplot( gs[0,2] )
# Set xlim
x0 = -40.; x1 = 40.
axn.set_xlim( x0, x1)
axEntr.set_xlim( x0, x1)
axEntr.set_ylim( 0., 1.0)
axSpi.set_xlim( x0, x1)
axSpi.set_ylim( 0., 3.0)
axnInt.set_xlim( 0., x1 )
axU.set_xlim( x0, x1 )
axU.set_ylim( 0., np.amax( lda0.onsite_t_111 * lda0.tunneling_111 *1.05 ) )
axt.set_xlim( x0, x1 )
axt.set_ylim( 0., 0.12)
axUt.set_xlim( x0, x1 )
axUt.set_ylim( 0., np.amax( lda0.onsite_t_111 * 1.05 ))
axv0.set_xlim( x0, x1 )
lw0 = 2.5
# Plot relevant quantities
r111_, density_111 = lda0.getDensity( lda0.globalMu, lda0.T )
r111_Entr, entropy_111 = lda0.getEntropy111( lda0.globalMu, lda0.T)
r111_Spi, spi_111 = lda0.getSpi111( lda0.globalMu, lda0.T)
V0_111 = lda0.pot.S0( lda0.X111, lda0.Y111, lda0.Z111 )
# density, entropy and spi
axn.plot( lda0.r111, density_111, lw=lw0 , color='black')
axEntr.plot( lda0.r111, entropy_111, lw=lw0 , color='black')
axSpi.plot( lda0.r111, spi_111, lw=lw0 , color='black')
# U
axU.plot( lda0.r111, lda0.onsite_t_111 * lda0.tunneling_111 , \
lw=lw0, label='$U$', color='black')
# t
axt.plot( lda0.r111, lda0.tunneling_111,lw=lw0, label='$t$', \
color='black')
# U/t
axUt.plot( lda0.r111, lda0.onsite_t_111, lw=lw0, color='black')
# Lattice depth
#print "shape of V0 = ", V0_111.shape
axv0.plot( lda0.r111, V0_111[0], lw=lw0, color='black', \
label='$\mathrm{Lattice\ depth}$')
# Band gap
bandgap_111 = bands = scubic.bands3dvec( V0_111, NBand=1 )[0] \
- scubic.bands3dvec( V0_111, NBand=0 )[1]
axv0.plot( lda0.r111, bandgap_111, lw=lw0, linestyle=':', color='black', \
label='$\mathrm{Band\ gap}$')
axv0.legend( bbox_to_anchor=(0.03,0.02), \
loc='lower left', numpoints=3, labelspacing=0.2,\
prop={'size':6}, handlelength=1.5, handletextpad=0.5 )
# Define function to calculate cummulative atom number
def NRadius( Radius ):
"""
This function calculates the fraction of the atom number
up to a certain Radius
"""
valid = np.logical_and( np.abs(lda0.r111) < Radius, \
~np.isnan(density_111) )
r = lda0.r111[ valid ]
dens = density_111[ valid ]
return np.power( lda0.pot.l/2, -3) * \
2 * np.pi*integrate.simps( dens*(r**2), r) / lda0.Number
# Plot the cummulative atom number
radii = lda0.r111[ lda0.r111 > 4. ]
NInt = []
for radius in radii:
NInt.append( NRadius( radius ) )
NInt = np.array( NInt )
axnInt.plot( radii, NInt, lw=lw0, color='black')
# Define function to numerically solve for y in a pair of x,y arrays
def x_solve( x_array, y_array, yval ):
"""
This function solves for x0 in the equation y0=y(x0)
where the function y(x) is defined with data arrays.
"""
# Convert the array to a function and then solve for y==yval
yf = interp1d( x_array, y_array-yval, kind='cubic')
return optimize.brentq( yf, x_array.min(), x_array.max() )
def y_solve( x_array, y_array, xval ):
yf = interp1d( x_array, y_array, kind='cubic')
return yf(xval)
radius1e = x_solve( lda0.r111[ lda0.r111 > 0 ] , \
density_111[ lda0.r111 > 0 ] , \
density_111.max()/np.exp(1.) )
pos_r111 = lda0.r111[ lda0.r111 > 0 ]
pos_dens111 = density_111[ lda0.r111 > 0 ]
#slice_type = 'defined_bins'
slice_type = 'percentage'
if slice_type == 'defined_bins':
print pos_dens111.max()
cutoffs = [ 1.20, 1.05, 0.95, 0.75, 0.50, 0.25, 0.00 ]
if pos_dens111.max() < 1.20 :
cutoffs = cutoffs[1:]
if pos_dens111.max() < 1.05 :
cutoffs = cutoffs[1:]
nrange0 = [ pos_dens111.max() ] + cutoffs[:-1]
nrange1 = cutoffs
print nrange0
print nrange1
rbins = []
for i in range(len(nrange1)-1):
if np.any( pos_dens111 > nrange1[i] ):
rbins.append(( (nrange1[i] + nrange0[i])/2., \
x_solve( pos_r111, pos_dens111, nrange1[i] ) ))
print rbins
rcut = [ b[1] for b in rbins ]
print " Bins cut radii = ", rcut
elif slice_type == 'percentage':
# Find the various radii that split the cloud into slots of 20% atom number
rcut = []
nrange0 = [ pos_dens111[0] ]
nrange1 = []
for Ncut in [0.2, 0.4, 0.6, 0.8 ]:
sol = x_solve( radii, NInt, Ncut )
rcut.append( sol )
denssol = y_solve( pos_r111, pos_dens111, sol )
nrange0.append( denssol )
nrange1.append( denssol )
nrange1.append(0.)
# get the number of atoms in each bin
binedges = rcut + [rcut[-1]+20.]
Nbin = []
for b in range(len(rcut) + 1 ):
if b == 0:
Nbin.append( NRadius( binedges[b] ) )
else:
Nbin.append( NRadius(binedges[b]) - NRadius(binedges[b-1]) )
Nbin = np.array( Nbin )
Nbinsum = Nbin.sum()
if np.abs( Nbinsum - 1.0 ) > 0.01:
print "Total natoms from adding bins = ", Nbinsum
raise ValueError("Normalization issue with density distribution.")
# Define functions to average over the shells
def y_average( y_array, x0, x1):
# Average y_array over the radii x0 to x1, weighted by density
valid = np.logical_and( np.abs(lda0.r111) < 70., ~np.isnan(density_111) )
r = lda0.r111[ valid ]
dens = density_111[ valid ]
y = y_array[ valid ]
shell = np.logical_and( r >= x0, r<x1 )
r = r[shell]
dens = dens[shell]
y = y[shell]
num = integrate.simps( y* dens*(r**2), r)
den = integrate.simps( dens*(r**2), r)
return num/den
# Define a function here that makes a piecewise function with the average
# values of a quantity so that it can be plotted
def binned( x, yqty ):
x = np.abs(x)
yavg = []
cond = []
for x0,x1 in zip( [0.]+rcut, rcut+[rcut[-1]+20.]):
cond.append(np.logical_and( x >= x0 , x<x1 ) )
yavg.append( y_average( yqty, x0, x1) )
return np.piecewise( x, cond, yavg ), yavg
# Calculate and plot the binned quantities
dens_binned = binned( lda0.r111, density_111 )
entr_binned = binned( lda0.r111, entropy_111 )
spi_binned = binned( lda0.r111, spi_111 )
Ut_binned = binned( lda0.r111, lda0.onsite_t_111 )
U_binned = binned( lda0.r111, lda0.onsite_t_111 * lda0.tunneling_111 )
t_binned = binned( lda0.r111, lda0.tunneling_111 )
peak_dens = np.amax( density_111 )
peak_t = np.amin( lda0.tunneling_111 )
axn.fill_between( lda0.r111, dens_binned[0], 0., \
lw=2, color='red', facecolor='red', \
zorder=2, alpha=0.8)
axEntr.fill_between( lda0.r111, entr_binned[0], 0., \
lw=2, color='red', facecolor='red', \
zorder=2, alpha=0.8)
axSpi.fill_between( lda0.r111, spi_binned[0], 0., \
lw=2, color='red', facecolor='red', \
zorder=2, alpha=0.8)
axUt.fill_between( lda0.r111, Ut_binned[0], 0., \
lw=2, color='red', facecolor='red', \
zorder=2, alpha=0.8 )
axU.fill_between( lda0.r111, U_binned[0], 0., \
lw=2, color='red', facecolor='red',label='$U$', \
zorder=2, alpha=0.8)
axt.fill_between( lda0.r111, t_binned[0], 0., \
lw=2, color='red', facecolor='red',linestyle=':',\
label='$t$', zorder=2, alpha=0.8)
# Set y labels
axn.set_ylabel(r'$n$')
axEntr.set_ylabel(r'$s$')
axSpi.set_ylabel(r'$S_{\pi}$')
axnInt.set_ylabel(r'$N_{<R}$')
axU.set_ylabel(r'$U\,(E_{R})$')
axt.set_ylabel(r'$t\,(E_{R})$')
axUt.set_ylabel(r'$U/t$')
axv0.set_ylabel(r'$E_{R}$')
# Set y lims
n_ylim = kwargs.get('n_ylim',None)
if n_ylim is not None: axn.set_ylim( *n_ylim)
letters = [\
r'\textbf{a}',\
r'\textbf{b}',\
r'\textbf{c}',\
r'\textbf{d}',\
r'\textbf{e}',\
r'\textbf{f}',\
r'\textbf{g}',\
r'\textbf{h}',\
]
for i,ax in enumerate([axn, axEntr, axSpi, axnInt, axU, axt, axUt, axv0]):
ax.text( 0.08,0.86, letters[i] , transform=ax.transAxes, fontsize=14)
ax.yaxis.grid()
ax.set_xlabel(r'$\mu\mathrm{m}$')
for n,r in enumerate(rcut):
if n % 2 == 0:
if n == len(rcut) - 1:
r2 = 60.
else:
r2 = rcut[n+1 ]
ax.axvspan( r, r2, facecolor='lightgray')
if i != 3:
ax.axvspan(-r2, -r, facecolor='lightgray')
ax.axvline( r, lw=1.0, color='gray', zorder=1 )
if i != 3:
ax.axvline(-r, lw=1.0, color='gray', zorder=1 )
ax.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(20) )
ax.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(10) )
#labels = [item.get_text() for item in ax.get_xticklabels()]
#print labels
#labels = ['' if float(l) % 40 != 0 else l for l in labels ]
#ax.set_xticklabels(labels)
axnInt.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(10) )
axnInt.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(5) )
# Finalize figure
gs.tight_layout(fig, rect=[0.,0.0,1.0,0.94])
if kwargs.get('closefig', False):
plt.close()
#dens_set = np.array( [ b[0] for b in rbins ] + [dens_binned[1][-1]] )
binresult = np.column_stack((
np.round( Nbin, decimals=3),\
np.round( nrange1, decimals=3),\
np.round( nrange0, decimals=3),\
np.round( dens_binned[1], decimals=2),\
np.round( t_binned[1], decimals=3),\
np.round( U_binned[1], decimals=3),\
np.round( Ut_binned[1], decimals=3) ))
from tabulate import tabulate
output = tabulate(binresult, headers=[\
"Atoms in bin", \
"n min", \
"n max", \
"Mean n", \
"Mean t", \
"Mean U", \
"Mean U/t", ]\
, tablefmt="orgtbl", floatfmt='.3f')
#, tablefmt="latex", floatfmt='.3f')
#print
#print output
if kwargs.get('return_profile', False):
return fig, binresult,\
peak_dens, radius1e, peak_t, output, r111_, density_111
else:
return fig, binresult,\
peak_dens, radius1e, peak_t, output
def CheckInhomogSimple( lda0, **kwargs ):
"""This function will make a plot along 111 of the density, U/t
and T/t
It is useful to assess the degree of inhomogeneity in our system"""
# Prepare the figure
fig = plt.figure(figsize=(9.,4.2))
lattlabel = '\n'.join( list( lda0.pot.Info() ) )
lattlabel = '\n'.join( [ i.split( r'$\mathrm{,}\ $' )[0].replace('s','v') \
for i in lda0.pot.Info() ] )
Nlabel = r'$N=%.2f\times 10^{5}$' % (lda0.Number/1e5)
Slabel = r'$S/N=%.2fk_{\mathrm{B}}$' % ( lda0.Entropy / lda0.Number )
thermolabel = '\n'.join([Nlabel, Slabel])
ldainfoA = '\n'.join(lda0.Info().split('\n')[:2])
ldainfoB = '\n'.join(lda0.Info().split('\n')[-2:])
fig.text( 0.05, 0.98, lattlabel, ha='left', va='top', linespacing=1.2)
fig.text( 0.48, 0.98, ldainfoA, ha='right', va='top', linespacing=1.2)
fig.text( 0.52, 0.98, ldainfoB, ha='left', va='top', linespacing=1.2)
fig.text( 0.95, 0.98, thermolabel, ha='right', va='top', linespacing=1.2)
gs = matplotlib.gridspec.GridSpec( 1,3, wspace=0.18,\
left=0.1, right=0.9, bottom=0.05, top=0.98)
# Setup axes
axn = fig.add_subplot(gs[0,0])
axU = fig.add_subplot(gs[0,1])
axT = fig.add_subplot(gs[0,2])
# Set xlim
x0 = -40.; x1 = 40.
axn.set_xlim( x0, x1)
axU.set_xlim( x0, x1 )
axU.set_ylim( 0., np.amax( lda0.onsite_t_111 * lda0.tunneling_111 *1.05 ) )
axT.set_xlim( x0, x1 )
axT.set_ylim( 0., 1.0)
lw0 = 2.5
# Plot relevant quantities
r111_, density_111 = lda0.getDensity( lda0.globalMu, lda0.T )
# density,
axn.plot( lda0.r111, density_111, lw=lw0 , color='black')
# U
Ut_111 = lda0.onsite_t_111
axU.plot( lda0.r111, Ut_111 , \
lw=lw0, label='$U$', color='black')
# T
Tt_111 = lda0.T / lda0.tunneling_111
axT.plot( lda0.r111, Tt_111, lw=lw0, label='$T$', \
color='black')
peak_dens = np.amax( density_111 )
peak_t = np.amin( lda0.tunneling_111 )
# Set y labels
axn.set_ylabel(r'$n$')
axU.set_ylabel(r'$U/t$')
axT.set_ylabel(r'$T/t$')
# Set y lims
n_ylim = kwargs.get('n_ylim',None)
if n_ylim is not None: axn.set_ylim( *n_ylim)
letters = [\
r'\textbf{a}',\
r'\textbf{b}',\
r'\textbf{c}',\
]
for i,ax in enumerate([axn, axU, axT]):
ax.text( 0.08,0.86, letters[i] , transform=ax.transAxes, fontsize=14)
ax.yaxis.grid()
ax.set_xlabel(r'$\mu\mathrm{m}$')
ax.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(20) )
ax.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(10) )
#labels = [item.get_text() for item in ax.get_xticklabels()]
#print labels
#labels = ['' if float(l) % 40 != 0 else l for l in labels ]
#ax.set_xticklabels(labels)
# Finalize figure
gs.tight_layout(fig, rect=[0.,0.0,1.0,0.94])
if kwargs.get('closefig', False):
plt.close()
if kwargs.get('return_profile', False):
return fig, peak_dens, peak_t, r111_, density_111, Ut_111 ,Tt_111
else:
return fig, peak_dens, peak_t
def CheckInhomogTrap( lda0, **kwargs ):
"""This function will make a plot along 111 of U, t, U/t, v0, W, and W/U
(where W is the band gap)
It is useful to assess the degree of inhomogeneity in our system"""
# Prepare the figure
fig = plt.figure(figsize=(8.,4.2))
lattlabel = '\n'.join( list( lda0.pot.Info() ) )
lattlabel = '\n'.join( [ i.split( r'$\mathrm{,}\ $' )[0].replace('s','v') \
for i in lda0.pot.Info() ] )
ldainfoA = '\n'.join(lda0.Info().split('\n')[:2])
ldainfoB = '\n'.join(lda0.Info().split('\n')[-2:])
fig.text( 0.05, 0.98, lattlabel, ha='left', va='top', linespacing=1.2)
fig.text( 0.48, 0.98, ldainfoA, ha='right', va='top', linespacing=1.2)
fig.text( 0.52, 0.98, ldainfoB, ha='left', va='top', linespacing=1.2)
gs = matplotlib.gridspec.GridSpec( 2,4, wspace=0.18,\
left=0.1, right=0.9, bottom=0.05, top=0.98)
# Setup axes
axU = fig.add_subplot(gs[0,0])
axt = fig.add_subplot(gs[0,1])
ax12t = fig.add_subplot(gs[0,2])
axUt = fig.add_subplot(gs[0,3])
axv0 = fig.add_subplot(gs[1,0])
axW = fig.add_subplot(gs[1,1])
axWU = fig.add_subplot(gs[1,2])
axW12t = fig.add_subplot(gs[1,3])
axs = [axU, axt, ax12t, axUt, axv0, axW, axWU, axW12t]
# Set xlim
x0 = 0.; x1 = 40.
for ax in axs:
ax.set_xlim( x0, x1)
# Set y labels
axU.set_ylabel(r'$U\,(E_{R})$')
axt.set_ylabel(r'$t\,(\mathrm{kHz})$')
ax12t.set_ylabel(r'$12t\,(E_{R})$')
axUt.set_ylabel(r'$U/t$')
axv0.set_ylabel(r'$v_{0}\,(E_{R})$')
axW.set_ylabel(r'$W\,(E_{R})$')
axWU.set_ylabel(r'$W/U$')
axW12t.set_ylabel(r'$W/(12t)$')
#axU.set_ylim( 0., np.amax( lda0.onsite_t_111 * lda0.tunneling_111 *1.05 ) )
lw0 = 2.5
# U
U_111 = lda0.onsite_t_111 * lda0.tunneling_111
axU.plot( lda0.r111, U_111 , \
lw=lw0, label='$U/t$', color='black')
# t
t_111 = lda0.tunneling_111
axt.plot( lda0.r111, t_111*29., \
lw=lw0, label='$t$', color='black')
# 12t
t_111 = lda0.tunneling_111
ax12t.plot( lda0.r111, 12.*t_111 , \
lw=lw0, label='$t$', color='black')
# U/t
Ut_111 = lda0.onsite_t_111
axUt.plot( lda0.r111, Ut_111 , \
lw=lw0, label='$U$', color='black')
# v0
V0_111 = lda0.pot.S0( lda0.X111, lda0.Y111, lda0.Z111 )
axv0.plot( lda0.r111, V0_111[0], lw=lw0, color='black', \
label='$\mathrm{Lattice\ depth}$')
# Band gap
bandgap_111 = bands = scubic.bands3dvec( V0_111, NBand=1 )[0] \
- scubic.bands3dvec( V0_111, NBand=0 )[1]
axW.plot( lda0.r111, bandgap_111, lw=lw0, color='black', \
label='$\mathrm{Band\ gap},\,W$')
# Band gap / U
axWU.plot( lda0.r111, bandgap_111 / U_111, lw=lw0, color='black', \
label='$W/U$')
# Band gap / 12t
axW12t.plot( lda0.r111, bandgap_111 / (12.*t_111), lw=lw0, color='black', \
label='$W/(12t)$')
letters = [\
r'\textbf{a}',\
r'\textbf{b}',\
r'\textbf{c}',\
r'\textbf{d}',\
r'\textbf{e}',\
r'\textbf{f}',\
]
for i,ax in enumerate(axs):
#ax.text( 0.08,0.86, letters[i] , transform=ax.transAxes, fontsize=14)
ax.yaxis.grid()
ax.set_xlabel(r'$\mu\mathrm{m}$')
ax.xaxis.set_major_locator( matplotlib.ticker.MultipleLocator(10) )
ax.xaxis.set_minor_locator( matplotlib.ticker.MultipleLocator(5) )
#labels = [item.get_text() for item in ax.get_xticklabels()]
#print labels
#labels = ['' if float(l) % 40 != 0 else l for l in labels ]
#ax.set_xticklabels(labels)
# Finalize figure
gs.tight_layout(fig, rect=[0.,0.0,1.0,0.94])
if kwargs.get('closefig', False):
plt.close()
return fig
| mit | 8,712,553,290,903,506,000 | 36.097433 | 83 | 0.517302 | false |
cheery/language | parser/__init__.py | 1 | 15728 | from lookahead import CharacterLookAhead, LookAhead
from structures import Constant, Struct
specials = {
',': 'comma',
'(': 'leftparen', ')': 'rightparen', '[': 'leftbracket', ']': 'rightbracket',
}
operators = set([
'or', 'and', 'not',
'!', ':', '=', '-', '+', '*', '/', '<>', '==', '!=', '->',
'<', '<=', '>', '>=', '|', '^', '&', '<<', '>>', '//', '%', '~',
'.', '.;', '.:', ':.', ';', '@', '::', '..', ':=',
])
infix_operators = {
'or': 20,
'and': 30,
'<': 40, '<=': 40, '>': 40, '>=': 40,
'<>': 40, '!=': 40, '==': 40,
'|': 50,
'^': 60,
'&': 70,
'<<': 80, '>>': 80,
'+': 90, '-': 90,
'*': 100, '/': 100, '//': 100, '%': 100,
}
prefix_operators = {
'not': 30,
'+': 110, '-': 110, '~': 110,
}
right_binding = set(['or', 'and', '<', '<=', '>', '>=', '<>', '!=', '=='])
def main():
source = 'tokens'
fd = open(source, 'r')
for structure in parse(fd.read().decode('utf-8'), source, debug=True):
print structure
def parse_file(source, debug=False):
with open(source, 'r') as fd:
return parse(fd.read().decode('utf-8'), source, debug)
def parse(source, filename=None, debug=False):
cla = CharacterLookAhead(source)
if debug:
tla = LookAhead(tokenlogger(tokenize(cla), filename))
else:
tla = LookAhead(tokenize(cla))
if ahead(tla, 'newline'):
expect(tla, 'newline')
if not tla.empty:
return parse_block(tla)
else:
return ()
def parse_block(tla):
yield parse_sentence(tla)
while not tla.empty and not ahead(tla, 'dedent'):
expect(tla, 'newline')
yield parse_sentence(tla)
def parse_sentence(tla, required=True):
location = tla.location
head = parse_word(tla, required, 10)
if head is None:
return
if ahead_string(tla, '=', ':', ':='):
operator = Constant(tla.location, 'operator', expect(tla, 'operator').string)
blocks = find_placeholders(head)
if len(blocks) > 0:
raise Exception("%s: not allowed on toplevel lhs side of '=' or ':'." % linecol(blocks[0].location))
return Struct(location, 'infix', operator, head, parse_sentence(tla))
sentence = Struct(location, 'sentence', head)
for word in repeated(parse_word, tla, False, 0):
sentence.append(word)
blocks = find_placeholders(sentence)
if ahead(tla, 'indent'):
expect(tla, 'indent')
if len(blocks) > 1:
raise Exception("%s: cannot fill this placeholder" % linecol(blocks[0].location))
elif len(blocks) > 0:
block = blocks[0]
for item in parse_block(tla):
block.append(item)
else:
sentence.append(Struct(location, 'block', *parse_block(tla)))
expect(tla, 'dedent')
elif len(blocks) > 0:
raise Exception("%s: cannot fill this placeholder" % linecol(blocks[0].location))
return sentence
def find_placeholders(node, out=None):
out = [] if out is None else out
if node.group == 'block':
out.append(node)
elif isinstance(node, Struct):
for item in node:
if item.group == 'sentence' or item.group == 'function':
continue
find_placeholders(item, out)
return out
def parse_word(tla, required, precedence):
location = tla.location
expr = parse_slot(tla, required, precedence)
if expr is None:
return
if ahead(tla, 'comma'):
expr = Struct(location, 'tuple+', expr)
while ahead(tla, 'comma'):
expect(tla, 'comma')
expr.append(parse_slot(tla, True, precedence))
return expr
def parse_arglist(tla, location, *head):
arglist = Struct(location, 'tuple', *head)
slot = parse_slot(tla, False, 0)
if slot is None:
return arglist
arglist.append(slot)
while ahead(tla, 'comma'):
expect(tla, 'comma')
arglist.append(parse_slot(tla, True, 0))
return arglist
def parse_slot(tla, required, precedence):
if precedence >= 10:
return parse_slice(tla, required, precedence)
location = tla.location
slic = parse_slice(tla, required, precedence)
if ahead_string(tla, '=', ':'):
operator = Constant(tla.location, 'operator', expect(tla, 'operator').string)
return Struct(location, 'infix', operator, slic, parse_slot(tla, required, precedence))
return slic
def parse_slice(tla, required, precedence):
location = tla.location
expr = parse_expr(tla, False, precedence)
if expr is None:
condition = lambda: tla.value.near == tla.value.balanced
else:
condition = lambda: tla.value.balanced
if ahead_string(tla, '.:', ':.') and condition():
mode = ('incr' if tla.step().string == '.:' else 'decr')
start = expr
stop = parse_expr(tla, False, precedence)
if start is None:
start = Constant(tla.location, 'symbol', 'null')
if stop is None:
stop = Constant(tla.location, 'symbol', 'null')
stride = Constant(tla.location, 'symbol', 'null')
step = Constant(tla.location, 'symbol', 'null')
if ahead_string(tla, '::') and tla.value.balanced:
expect(tla, 'operator')
stride = parse_expr(tla, False, precedence)
if ahead_string(tla, '..') and tla.value.balanced:
expect(tla, 'operator')
step = parse_expr(tla, False, precedence)
return Struct(location, mode, start, stop, stride, step)
if expr is None:
return parse_expr(tla, required, precedence)
return expr
def parse_expr(tla, required, precedence):
location = tla.location
if ahead(tla, 'operator') and tla.value.string in prefix_operators:
if tla.value.near <> tla.value.balanced and tla.value.string <> 'not':
raise Exception("%s: This is not C" % linecol(tla.location))
operator = Constant(tla.location, 'operator', expect(tla, 'operator').string)
expr = Struct(location, 'prefix', operator, parse_expr(tla, True, prefix_operators[operator.value]))
else:
expr = parse_fullterm(tla, required)
while ahead(tla, 'operator') and tla.value.string in infix_operators:
prec = infix_operators[tla.value.string]
if prec <= precedence or not tla.value.balanced:
break
prex = prec - (tla.value.string in right_binding)
operator = Constant(tla.location, 'operator', expect(tla, 'operator').string)
expr = Struct(location, 'infix', operator, expr, parse_expr(tla, True, prex))
return expr
def parse_fullterm(tla, required):
term = parse_term(tla, required)
while not tla.empty and tla.value.near:
location = tla.location
if ahead(tla, 'attribute'):
string = expect(tla, 'attribute').string
term = Struct(location, 'attribute', term, Constant(location, 'attribute', string[1:]))
elif ahead(tla, 'leftparen'):
expect(tla, 'leftparen')
term = parse_arglist(tla, location, term)
term.group = 'call'
expect(tla, 'rightparen')
elif ahead(tla, 'leftbracket'):
expect(tla, 'leftbracket')
term = parse_arglist(tla, location, term)
term.group = 'index'
expect(tla, 'rightbracket')
elif ahead_string(tla, ';'):
expect(tla, 'operator')
term = Struct(location, 'call', term, Struct(location, "block"))
elif ahead_string(tla, '.;'):
expect(tla, 'operator')
term = Struct(location, 'attribute', term, Struct(location, "block"))
else:
break
return term
def parse_term(tla, required):
location = tla.location
if ahead(tla, 'symbol'):
return Constant(location, 'symbol', expect(tla, 'symbol').string)
elif ahead_string(tla, ';'):
expect(tla, 'operator')
return Struct(location, 'block')
elif ahead(tla, 'string'):
string = expect(tla, 'string').string
return Constant(location, 'string', string[1:-1])
elif ahead(tla, 'number'):
string = expect(tla, 'number').string
if ahead(tla, 'flot'):
if not tla.value.near:
raise Exception("%s: decimal expression supposed to be typed with no spacing" % (linecol(tla.location)))
string += expect(tla, 'flot').string
return Constant(location, 'float', string)
return Constant(location, 'number', string)
elif ahead(tla, 'leftparen'):
expect(tla, 'leftparen')
if ahead(tla, 'operator'):
operator = Constant(tla.location, 'operator', expect(tla, 'operator').string)
expect(tla, 'rightparen')
return operator
else:
term = parse_arglist(tla, location)
expect(tla, 'rightparen')
if ahead_string(tla, '->'):
expect(tla, 'operator')
blocks = find_placeholders(term)
if len(blocks) > 0:
raise Exception("%s: not allowed inside function argument list" % linecol(blocks[0].location))
return parse_function(tla, location, term)
elif len(term) == 1 and term[0].group != 'block':
return term[0]
else:
term.group = 'tuple'
return term
elif ahead(tla, 'leftbracket'):
expect(tla, 'leftbracket')
arglist = parse_arglist(tla, location)
arglist.group = 'list'
expect(tla, 'rightbracket')
return arglist
elif ahead_string(tla, '->'):
expect(tla, 'operator')
return parse_function(tla, location, Struct(location, 'arglist'))
elif ahead_string(tla, '@'):
expect(tla, 'operator')
term = Constant(location, 'self', None)
if ahead(tla, '.'):
raise Exception("%s: you're serious?" % (linecol(tla.location)))
if ahead(tla, 'symbol') and tla.value.near:
term = Struct(location, 'attribute', term, Constant(tla.location, 'attribute', expect(tla, 'symbol').string))
return term
elif required:
raise Exception("%s: a term is missing after '%s'" % (linecol(tla.previous_location), tla.previous_value.string))
def parse_function(tla, location, func):
func.group = 'function'
sentence = parse_sentence(tla, False)
if sentence is not None:
func.append(sentence)
elif ahead(tla, 'indent'):
expect(tla, 'indent')
func.append(Struct(location, 'block', *parse_block(tla)))
expect(tla, 'dedent')
return func
def repeated(fn, *args):
node = fn(*args)
while node is not None:
yield node
node = fn(*args)
def ahead_string(tla, *strings):
return not tla.empty and tla.value.string in strings
def ahead(tla, *groups):
return not tla.empty and tla.value.group in groups
def expect(tla, group, string=None):
if tla.empty:
raise Exception(u"%s: expected %s, but stream is empty" % (linecol(tla.location), repr_expect(group, string)))
value = tla.value
valid = (value.group == group) and string is None or value.string == string
if not valid:
raise Exception(u"%s: expected %s, got %r(%s)" % (linecol(tla.location), repr_expect(group, string), value.string, value.group))
return tla.step()
def repr_expect(group, string):
if string is None:
return "(%s)" % group
else:
return "%r(%s)" % (string, group)
def tokenlogger(tokens, source):
for token in tokens:
print "%s:%s: %r (%s) near=%r balanced=%r" % (
source, linecol(token.location),
token.string, token.group, token.near, token.balanced
)
yield token
class tokenize(object):
def __init__(self, cla):
self.cla = cla
self.indent = 0
self.layers = [-1]
def __iter__(self):
return self
def next(self):
cla = self.cla
if cla.empty and len(self.layers) > 1:
self.indent = self.layers.pop(-1)
return Token(cla.location, '', 'dedent')
if cla.empty:
raise StopIteration
if self.indent < self.layers[-1]:
indent = self.layers.pop(-1)
if self.indent != self.layers[-1]:
return Token(cla.location, '', 'badline')
return Token(cla.location, '', 'dedent')
if self.indent == self.layers[-1]:
indent = self.layers.pop(-1)
return Token(cla.location, '', 'newline')
while cla.value == ' ':
cla.step()
if cla.value == '#':
while cla.value != '\n':
cla.step()
if cla.value == '\n':
cla.step()
indent = 0
while cla.value == ' ':
indent += 1
cla.step()
if cla.value == '\n' or cla.value == '#':
return self.next()
if cla.empty:
return self.next()
if indent > self.indent:
self.layers.append(self.indent)
self.indent = indent
return Token(cla.location, '', 'indent')
elif indent == self.indent:
return Token(cla.location, '', 'newline')
else:
self.indent = indent
return Token(cla.location, '', 'dedent')
location = cla.location
near = (cla.previous_value != ' ')
string = ""
if issym(cla.value):
while issym(cla.value):
string += cla.step()
balanced = near <> (cla.value == ' ')
if string in operators:
return Token(location, string, 'operator', near, balanced)
if string[:1].isdigit():
return Token(location, string, 'number', near, balanced)
return Token(location, string, 'symbol', near, balanced)
if cla.value in "\"'":
terminator = string = cla.step()
while cla.value != terminator:
string += cla.step()
if cla.value == '\\':
string += cla.step()
string += cla.step()
balanced = near <> (cla.value == ' ')
return Token(location, string, 'string', near, balanced)
string = cla.step()
if string == '.':
while issym(cla.value):
string += cla.step()
if string != '.':
balanced = near <> (cla.value == ' ')
if isnum(string[1]):
return Token(location, string, 'flot', near, balanced)
return Token(location, string, 'attribute', near, balanced)
if string in operators:
while not cla.empty and string + cla.value in operators:
string += cla.step()
balanced = near <> (cla.value == ' ')
return Token(location, string, 'operator', near, balanced)
balanced = near <> (cla.value == ' ')
if string in specials:
return Token(location, string, specials[string], near, balanced)
return Token(location, string, 'unknown', near, balanced)
def issym(ch):
return ch.isalnum() or ch == '_'
def isnum(ch):
return ch.isdigit()
def linecol(location):
return "line %i, col %i" % (location >> 8, location & 255)
class Token(object):
def __init__(self, location, string, group, near=False, balanced=False):
self.location = location
self.string = string
self.group = group
self.near = near
self.balanced = balanced
def __repr__(self):
return '<Token %r>' % self.string
if __name__=='__main__':
main()
| gpl-3.0 | -1,161,089,621,615,312,600 | 36.182033 | 136 | 0.557541 | false |
TumblrCommunity/PowerPortfolio | test.py | 1 | 4318 | # To execute this test run python test.py on the Terminal
from portfolio.application.base import application
from portfolio.models import needs_db
import os
import json
import unittest
import tempfile
class PortfolioTestCase(unittest.TestCase):
def setUp(self):
self.tester = application.test_client()
def login(self):
passwd = "somepassword"
self.tester.post('/admin/api/login',
data=json.dumps(dict(password=passwd)),
content_type='application/json')
def test_login(self):
passwd = "somepassword"
response = self.tester.post('/admin/api/login',
data=json.dumps(dict(password=passwd)),
content_type='application/json')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'auth':True})
passwd = "notsomepassword"
response = self.tester.post('/admin/api/login',
data=json.dumps(dict(password=passwd)),
content_type='application/json')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'auth':False})
def test_logged_in(self):
response = self.tester.get('/admin/api/logged_in')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'auth':False})
self.login()
response = self.tester.get('/admin/api/logged_in')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'auth':True})
def test_logout(self):
response = self.tester.get('/admin/api/logout')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'error':"Not logged in"})
self.login()
response = self.tester.get('/admin/api/logout')
self.assertEqual(response.status_code, 204)
response = self.tester.get('/admin/api/logout')
self.assertEqual(json.loads(response.data.decode('utf-8')), {'error':"Not logged in"})
def test_home_status_code(self):
response = self.tester.get('/')
self.assertEqual(response.status_code, 200)
def test_count(self):
response = self.tester.get('/api/projects/count', content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')), {'count':0})
def test_project_new(self):
self.login()
response = self.tester.post('/admin/api/projects/new',
data=json.dumps(dict(name='foo', url="http://", show=True, description="bar")),
content_type='application/json')
self.assertEqual(response.status_code, 204)
#adding two projects - ideally would like this to have this preset in test database
response = self.tester.post('/admin/api/projects/new',
data=json.dumps(dict(name='foo', url="http://", show=True, description="bar")),
content_type='application/json')
self.assertEqual(response.status_code, 204)
def test_project_read(self):
response = self.tester.get('/api/projects/1', content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')), {'key':1, 'name':'foo','url':"http://", 'show':True, 'description':"bar" })
def test_project_write(self):
self.login()
#test valid update
response = self.tester.post('/admin/api/projects/2',
data=json.dumps(dict(name='foop', description='barp', show = False, url="https://")),
content_type='application/json')
self.assertEqual(response.status_code, 204)
response = self.tester.get('/api/projects/2', content_type='application/json')
self.assertEqual(response.status_code, 200)
#test invalid update
self.assertEqual(json.loads(response.data.decode('utf-8')), {'key':2, 'name':'foop','url':"https://", 'show':False, 'description':"barp" })
response = self.tester.post('/admin/api/projects/2',
data=json.dumps(None),
content_type='application/json')
self.assertEqual(response.status_code, 400)
if __name__ == '__main__':
unittest.main()
| mit | 1,062,895,766,827,420,400 | 44.93617 | 147 | 0.606994 | false |
ThinkboxSoftware/Deadline | Custom/events/Zabbix/API/httpretty/core.py | 1 | 34264 | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import re
import codecs
import inspect
import socket
import functools
import itertools
import warnings
import logging
import traceback
import json
import contextlib
from .compat import (
PY3,
StringIO,
text_type,
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlunsplit,
urlsplit,
parse_qs,
unquote,
unquote_utf8,
ClassTypes,
basestring
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
if PY3: # pragma: no cover
basestring = (bytes, str)
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_ssl_wrap_socket = ssl.wrap_socket
if not PY3:
old_sslwrap_simple = ssl.sslwrap_simple
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
"""Represents a HTTP request. It takes a valid multi-line, `\r\n`
separated string with HTTP headers and parse them out using the
internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with StringIO
instances so that we garantee that it won't make any I/O, neighter
for writing nor reading.
It has some convenience attributes:
`headers` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
`method` -> the HTTP method used in this request
`querystring` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
```python
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
```
`parsed_body` -> a dictionary containing parsed request body or
None if HTTPrettyRequest doesn't know how to parse it. It
currently supports parsing body data that was sent under the
`content-type` headers values: 'application/json' or
'application/x-www-form-urlencoded'
"""
def __init__(self, headers, body=''):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.raw_headers = utf8(headers.strip())
self.body = utf8(body)
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
self.wfile = StringIO() # Creating `wfile` as an empty
# StringIO, just to avoid any real
# I/O calls
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
self.parse_request()
# making the HTTP method string available as the command
self.method = self.command
# Now 2 convenient attributes for the HTTPretty API:
# `querystring` holds a dictionary with the parsed query string
try:
self.path = self.path.encode('iso-8859-1')
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self.body)
def __str__(self):
return '<HTTPrettyRequest("{0}", total_headers={1}, body_length={2})>'.format(
self.headers.get('content-type', ''),
len(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
""" Attempt to parse the post based on the content-type passed. Return the regular body if not """
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
FALLBACK_FUNCTION = lambda x: x
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except:
return body
class EmptyRequestHeaders(dict):
pass
class HTTPrettyRequestEmpty(object):
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(StringIO):
pass
class FakeSSLSocket(object):
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class fakesock(object):
class socket(object):
_entry = None
debuglevel = 0
_sent_data = []
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
protocol=0):
self.setsockopt(family, type, protocol)
self.truesock = old_socket(family, type, protocol)
self._closed = True
self.fd = FakeSockFile()
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = self
self.is_http = False
self._bufsize = 16
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, family, type, protocol):
self.family = family
self.protocol = protocol
self.type = type
def connect(self, address):
self._address = (self._host, self._port) = address
self._closed = False
self.is_http = self._port in POTENTIAL_HTTP_PORTS | POTENTIAL_HTTPS_PORTS
if not self.is_http:
self.truesock.connect(self._address)
def close(self):
if not (self.is_http and self._closed):
self.truesock.close()
self._closed = True
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own StringIO buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
self._entry.fill_filekind(self.fd)
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's StringIO
buffer so that HTTPretty can return it accordingly when
necessary.
"""
if self.is_http: # no need to connect if `self.is_http` is
# False because self.connect already did
# that
self.truesock.connect(self._address)
self.truesock.settimeout(0)
self.truesock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = self.truesock.recv(self._bufsize)
self.fd.write(received)
should_continue = len(received) > 0
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
self._sent_data.append(data)
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
is_parsing_headers = False
if not self._entry:
# If the previous request wasn't mocked, don't mock the subsequent sending of data
return self.real_sendall(data, *args, **kw)
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and body != b'\r\n' and body != b'0\r\n\r\n':
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, False)
return
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
headers, body = list(map(utf8, data.split(b'\r\n\r\n', 1)))
request = httpretty.historify_request(headers, body)
info = URIInfo(hostname=self._host, port=self._port,
path=s.path,
query=s.query,
last_request=request)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
self._entry = None
self.real_sendall(data)
return
self._entry = matcher.get_next_entry(method, info, request)
def debug(self, func, *a, **kw):
if self.is_http:
frame = inspect.stack()[0][0]
lines = list(map(utf8, traceback.format_stack(frame)))
message = [
"HTTPretty intercepted and unexpected socket method call.",
("Please open an issue at "
"'https://github.com/gabrielfalcao/HTTPretty/issues'"),
"And paste the following traceback:\n",
"".join(decode_utf8(lines)),
]
raise RuntimeError("\n".join(message))
return func(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
def send(self, *args, **kwargs):
return self.debug(self.truesock.send, *args, **kwargs)
def sendto(self, *args, **kwargs):
return self.debug(self.truesock.sendto, *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.debug(self.truesock.recvfrom_into, *args, **kwargs)
def recv_into(self, *args, **kwargs):
return self.debug(self.truesock.recv_into, *args, **kwargs)
def recvfrom(self, *args, **kwargs):
return self.debug(self.truesock.recvfrom, *args, **kwargs)
def recv(self, *args, **kwargs):
return self.debug(self.truesock.recv, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.truesock, name)
def fake_wrap_socket(s, *args, **kw):
return s
def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
return '127.0.0.1'
def fake_gethostname():
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
return [(2, 1, 6, '', (host, port))]
class Entry(BaseClass):
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, text_type):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
try:
igot = int(got)
except ValueError:
warnings.warn(
'HTTPretty got to register the Content-Length header ' \
'with "%r" which is not a number' % got,
)
if igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header ' \
'Content-Length you registered expects size "%d" but ' \
'the body you registered for that has actually length ' \
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry %s %s getting %d>' % (
self.method, self.uri, self.status)
def normalize_headers(self, headers):
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(self.normalize_headers(self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length', self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
string_list.append('server: %s' % headers.pop('server'))
for k, v in headers.items():
string_list.append(
'{0}: {1}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset='utf-8'):
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
self.query = query or ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def __str__(self):
attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
credentials = ""
if self.password:
credentials = "{0}:{1}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{0}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False):
self._match_querystring = match_querystring
if type(uri).__name__ == 'SRE_Pattern':
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
#hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
return self.info == info
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({0})'
if self.info:
return wrap.format(text_type(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
#restrict selection to entries that match the requested method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
return text_type(self) == text_type(other)
class httpretty(HttpBaseClass):
"""The URI registration class"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
@classmethod
def match_uriinfo(cls, info):
for matcher, value in cls._entries.items():
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8'):
try:
import urllib3
except ImportError:
raise RuntimeError('HTTPretty requires urllib3 installed for recording actual requests.')
http = urllib3.PoolManager()
cls.enable()
calls = []
def record_request(request, uri, headers):
cls.disable()
response = http.request(request.method, uri)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
'headers': dict(response.headers)
}
})
cls.enable()
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, re.compile(r'.*', re.M), body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, origin):
cls.enable()
data = json.loads(open(origin).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
cls.register_uri(method, uri, body=item['response']['body'], forcing_headers=item['response']['headers'])
yield
cls.disable()
@classmethod
def reset(cls):
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', append=True):
request = HTTPrettyRequest(headers, body)
cls.last_request = request
if append or not cls.latest_requests:
cls.latest_requests.append(request)
else:
cls.latest_requests[-1] = request
return request
@classmethod
def register_uri(cls, method, uri, body='HTTPretty :)',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None, match_querystring=False,
**headers):
uri_is_string = isinstance(uri, basestring)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None,
status=200, streaming=False, **headers):
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = int(status)
headers[str('streaming')] = streaming
return Entry(method, uri, **headers)
@classmethod
def disable(cls):
cls._is_enabled = False
socket.socket = old_socket
socket.SocketType = old_socket
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_socket
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if not PY3:
ssl.sslwrap_simple = old_sslwrap_simple
ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple
@classmethod
def is_enabled(cls):
return cls._is_enabled
@classmethod
def enable(cls):
cls._is_enabled = True
socket.socket = fakesock.socket
socket._socketobject = fakesock.socket
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['_socketobject'] = fakesock.socket
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
ssl.wrap_socket = fake_wrap_socket
ssl.SSLSocket = FakeSSLSocket
ssl.__dict__['wrap_socket'] = fake_wrap_socket
ssl.__dict__['SSLSocket'] = FakeSSLSocket
if not PY3:
ssl.sslwrap_simple = fake_wrap_socket
ssl.__dict__['sslwrap_simple'] = fake_wrap_socket
def httprettified(test):
"A decorator tests that use HTTPretty"
def decorate_class(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
httpretty.reset()
httpretty.enable()
try:
return test(*args, **kw)
finally:
httpretty.disable()
return wrapper
if isinstance(test, ClassTypes):
return decorate_class(test)
return decorate_callable(test)
| apache-2.0 | -2,241,472,824,125,834,000 | 31.072394 | 117 | 0.540116 | false |
bobbyluig/Eclipse | src/agility/main.py | 1 | 45601 | from agility.maestro import Maestro
from agility.pololu.enumeration import uscSerialMode, ChannelMode, HomeMode
from agility.pololu.usc import Usc
from threading import Event
from shared.debug import Dummy
import numpy as np
import math
from matplotlib.path import Path
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import logging
import sys
logger = logging.getLogger('universe')
class ServoError(Exception):
pass
class Stepper:
def __init__(self, c1, c2, steps, direction=1):
self.c1 = c1 # Direction channel.
self.c2 = c2 # Step channel.
self.steps = steps
self.direction = direction
self.step = 1
self.target = 1
def get_position(self):
"""
Get the stepper's current position in degrees.
:return: Output degrees.
"""
return self.steps_to_deg(self.step)
def deg_to_steps(self, deg):
"""
Converts a normalized degree to the nearest integer step.
:param deg: The input degrees.
:return: The corresponding steps.
"""
steps = int(round(deg * (self.steps / 360))) * self.direction
if steps == 0:
return self.steps
else:
return steps
def steps_to_deg(self, steps):
"""
Converts steps to a degree.
:param steps: The number of steps.
:return: The corresponding angle.
"""
return steps * (360 / self.steps) * self.direction
def step_one(self, direction):
"""
Increment step counter.
:param direction: 1 steps up, -1 steps down.
"""
n = self.step + direction
if n > self.steps or n < 1:
self.step = 1
else:
self.step = n
def set_target(self, deg):
"""
Target a degree. Servo will attempt nearest path to target.
:param deg: The input degrees.
:return: The number of steps, either positive or negative.
"""
# Normalize.
deg -= 360 * (deg // 360)
steps = self.deg_to_steps(deg)
# Compute closest direction.
target = steps - self.step
delta = (self.steps / 2 - target) % self.steps - (self.steps / 2)
# Return.
return delta
class Servo:
def __init__(self, channel, min_deg, max_deg, min_pwm, max_pwm, max_vel,
bias=0, direction=1, left_bound=None, right_bound=None):
self.channel = channel # 0 to 17
self.min_deg = min_deg # -360 to 360 as (degrees)
self.max_deg = max_deg # -360 to 360 as (degrees)
self.min_pwm = min_pwm * 4 # 0 to 4000 as (us)
self.max_pwm = max_pwm * 4 # 0 to 4000 as (us)
self.max_vel = max_vel # 0 to 1000, as (ms / 60deg)
# Bias should be adjusted such that the servo is at kinematic "0" degree when it's target is 0 degrees.
# This is used to compensate for ridge spacing and inaccuracies during installation.
# Think of this like the "home" value of the servo.
self.bias = bias
if left_bound is None:
# Left bound (if not min_deg), with bias.
self.left_bound = self.min_deg
else:
self.left_bound = left_bound
if right_bound is None:
# Left bound (if not max_deg), with bias.
self.right_bound = self.max_deg
else:
self.right_bound = right_bound
assert(self.left_bound >= self.min_deg)
assert(self.right_bound <= self.max_deg)
# If the front of the servo is pointing in a negative axis, set this to negative 1.
# This reverses the directionality of all angle inputs.
self.direction = direction
# Dynamic current data.
self.pwm = 0
self.vel = 0
self.accel = 0
# User defined target. Also used to store last target.
# In units of 0.25 us.
self.target = 0
# Compute constants.
self.k_deg2mae = (self.max_pwm - self.min_pwm) / (self.max_deg - self.min_deg)
self.k_mae2deg = (self.max_deg - self.min_deg) / (self.max_pwm - self.min_pwm)
self.k_vel2mae = (60 * self.k_deg2mae) / self.max_vel * 10
self.k_mae2vel = self.max_vel / ((60 * self.k_deg2mae) * 10)
def zero(self):
"""
Set the servo to zero, ignoring bias.
"""
self.target = self.deg_to_maestro(0)
def get_range(self):
"""
Get the maximum and minimum, removing bias.
:return: (min, max)
"""
low = self.left_bound - self.bias
high = self.right_bound - self.bias
return low, high
def set_target(self, deg):
"""
Set the target for the servo.
:param deg: The input degrees.
"""
deg = self.normalize(deg)
self.target = self.deg_to_maestro(deg)
def normalize(self, deg):
"""
Normalize a degree for the servo, taking into account direction and bias.
:param deg: Input degrees.
:return: Output degrees.
"""
# Account for direction and bias.
deg = deg * self.direction + self.bias
# Normalize.
if deg > self.right_bound:
deg -= 360
elif deg < self.left_bound:
deg += 360
if deg > self.right_bound or deg < self.left_bound:
raise ServoError('Target out of range!')
return deg
def get_position(self):
"""
Get the servo's current position in degrees.
:return: Output degrees.
"""
deg = self.maestro_to_deg(self.pwm)
deg = (deg - self.bias) * self.direction
return deg
def at_target(self):
"""
Checks if the servo is at its target.
:return: True if servo is at its target, else False.
"""
return self.target == self.pwm
def passed_target(self, deg, greater):
"""
Checks if a servo has passed its target.
:param deg: The desired degrees to check.
:param greater: True to check >=, else <=.
:return: True if test is true, else False.
"""
deg = self.normalize(deg)
# Due to clockwise being defined as negative by Finesse, PWM checks should be inverted.
# This is due to the fact that higher PWM in servos is clockwise.
if greater:
return self.deg_to_maestro(deg) <= self.pwm
else:
return self.deg_to_maestro(deg) >= self.pwm
def deg_to_maestro(self, deg):
"""
Converts degrees to 0.25 us.
:param deg: The input degrees.
:return: The PWM in units of 0.25 us.
"""
return round(self.min_pwm + self.k_deg2mae * (deg - self.min_deg))
# Convert 0.25 us to degrees.
def maestro_to_deg(self, pwm):
"""
Converts 0.25 us to degrees.
:param pwm: The input PWM in units of 0.25 us.
:return: Degrees.
"""
return self.min_deg + self.k_mae2deg * (pwm - self.min_pwm)
class Body:
def __init__(self, length, width, cx, cy, mb, ml):
"""
Create a body object.
Note that dimensions are between kinematic roots.
:param length: Length of body (along x-axis).
:param width: Width of body (along y-axis).
:param cx: Bias of center of mass along x.
:param cy: Bias of center of mass along y.
:param mb: Mass of body.
:param ml: Mass of leg.
"""
# Define constants.
self.length = length
self.width = width
self.cx = cx
self.cy = cy
self.mb = mb
self.ml = ml
self.com = np.array((cx, cy, 0))
# Define quick access array.
self.j = np.array((
(2, 1),
(0, 3),
(3, 0),
(1, 2)
))
# Define static vertices.
x = 0.5 * self.length
y = 0.5 * self.width
self.vertices = np.array((
(x, y, 0),
(x, -y, 0),
(-x, y, 0),
(-x, -y, 0)
))
def default_bias(self, next_frame):
"""
Zeros vertices and bias.
:return: Bias.
"""
# Relative to absolute.
original = next_frame + self.vertices
# Get com.
cx, cy = self.get_com(original)
return np.array((-cx, -cy, 0))
@staticmethod
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians.
http://stackoverflow.com/questions/6802577/python-rotation-of-3d-vector (by unutbu).
:param axis: A numpy vector.
:param theta: A float.
:return: The quaternion.
"""
axis /= math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array(((aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)),
(2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)),
(2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc)))
def tilt_body(self, vertices, air, theta, lock=True):
"""
Tilt the body to give additional stability.
:param vertices: Vertices of the translated rectangle (4 x 3).
:param air: The index of the leg lifted in the air.
:param theta: Degrees to rotate in radians.
:param lock: Whether or not to lock z-value (usually 0) of the lifted leg.
:return: The tilted vertices.
"""
# Compute rotation axis.
legs = self.j[air]
r0, r1 = vertices[legs]
axis = r1 - r0
# Rotate about axis.
q = self.rotation_matrix(axis, theta)
r = np.dot(vertices, q.T)
if lock:
# Lock the lifted leg back to original position.
delta = vertices[air] - r[air]
vertices = r + delta
else:
# No need to lock. Vertices is simply r.
vertices = r
return vertices
@staticmethod
def closest(x1, x2, y1, y2, x, y):
"""
Compute the point along the two supporting legs that is closest to the center of mass.
This shall be known as "Alastair's magic."
"""
m = (y2 - y1) / (x2 - x1)
b1 = y1 - m * x1
b3 = y + (x / m)
x0 = (b3 - b1) / (m + 1 / m)
y0 = m * x0 + b1
return x0, y0
def get_com(self, frame):
"""
Compute the center of mass given the leg positions.
:param frame: The leg positions.
:return: com -> [cx, cy].
"""
com = self.ml * np.sum(frame[:, :2], axis=0) / (self.ml + self.mb)
com += self.com[:2]
return com
def adjust_crawl(self, off, next_frame, sigma=1.5):
"""
Adjust the center of mass for the crawl gait.
:param off: An array defining which legs are in the air.
:param next_frame: An array representing the next frame (4 x 3).
:param sigma: Safety boundary.
"""
# Get the leg in the air.
air = np.where(off)[0]
air = int(air)
legs = self.j[air]
# Relative to absolute.
original = next_frame + self.vertices
# Get points.
p = original[legs]
x1, y1, z1 = p[0]
x2, y2, z2 = p[1]
# Compute center of mass as with leg positions.
cx, cy = self.get_com(original)
# Get shortest path from zero-moment point to support triangle (perpendicular).
x0, y0 = self.closest(x1, x2, y1, y2, cx, cy)
# Compute additional safety margin.
theta = math.atan2((y2 - y1), (x2 - x1))
rx = sigma * math.sin(theta) + x0
ry = -sigma * math.cos(theta) + y0
rz = 0
rho = np.array((rx, ry, rz))
# Adjust vertices.
# new = original + rho
# Perform tilt.
# new = self.tilt_body(new, air, 0.0)
# Compute bias.
# bias = new - original
return rho
def adjust_trot(self, off, next_frame):
"""
Adjust the center of mass for the crawl gait.
:param off: An array defining which legs are in the air.
:param next_frame: An array representing the next frame (4 x 3).
"""
# Get the leg on the ground.
legs = np.where(~off)[0]
# Relative to absolute.
original = next_frame + self.vertices
# Get points.
p = original[legs]
x1, y1, z1 = p[0]
x2, y2, z2 = p[1]
# Compute center of mass as with leg positions.
cx, cy = self.get_com(original)
# Get closest point from center of mass to support.
x0, y0 = self.closest(x1, x2, y1, y2, cx, cy)
# Compute bias.
rx = x0 - cx
ry = y0 - cy
rz = 0
rho = np.array((rx, ry, rz))
return rho
def adjust(self, off, next_frame, count=None):
"""
Adjust the center of mass.
:param off: An array indicating whether the leg is in the air.
:param next_frame: The next frame.
:param count: The number of legs in the air.
:return: The bias.
"""
# Check which (if any) optimization is needed.
if count is None:
count = np.count_nonzero(off)
if count == 1:
# Crawl gait.
return self.adjust_crawl(off, next_frame)
elif count == 2 and off[1] == off[2]:
# Trot gait.
return self.adjust_trot(off, next_frame)
else:
return self.default_bias(next_frame)
def translate(self, x, y, z):
"""
Translate the body and thus the center of mass.
:param x: Motion along x.
:param y: Motion along y.
:param z: Motion along z.
:return: Bias.
"""
t = np.array((x, y, z), dtype=float)
bias = np.array((self.cx, self.cy, 0), dtype=float) + t
return bias
def is_supported(self, vertices):
"""
Checks if a given support triangle contains the center of mass.
This assumes the robot is not on a slant or hill.
:param vertices: The transformed vertices as a 3 x 2 numpy matrix.
:return: True if center of mass is in triangle, else False.
"""
triangle = Path(vertices)
return triangle.contains_point(self.com[:2])
class Leg:
def __init__(self, servo1, servo2, servo3, lengths, index, ik, fk):
"""
Create a leg object.
:param servo1: The first hip servo object.
:param servo2: The second hip servo object.
:param servo3: The knee servo object.
:param lengths: The leg segment lengths l1 and l2.
:param index: The leg index (1 - 4).
:param ik: Inverse kinematics solver.
:param fk: Forward kinematics solver.
"""
self.servos = [servo1, servo2, servo3]
self.lengths = lengths
self.length = sum(lengths)
self.index = index
self.ik_solver = ik
self.fk_solver = fk
self.position = None
def target_point(self, point):
"""
Target a point in space.
:param point: (x, y, z).
:return: True if target is reachable, else False.
"""
try:
angles = self.ik_solver(self.lengths, point)
self.servos[0].set_target(angles[0])
self.servos[1].set_target(angles[1])
self.servos[2].set_target(angles[2])
self.position = point
except (ServoError, ValueError, ZeroDivisionError):
logger.error('Leg {} is unable to reach point ({:.2f}, {:.2f}, {:.2f})'.format(self.index, *point))
return False
return True
def target_angle(self, angle):
"""
Target an angle configuration.
:param angle: (theta1, theta2, theta3).
:return: True if target is reachable, else False.
"""
try:
self.servos[0].set_target(angle[0])
self.servos[1].set_target(angle[1])
self.servos[2].set_target(angle[2])
self.position = self.fk_solver(self.lengths, angle)
except ServoError:
logger.error('Leg {} is unable to reach angle ({:.2f}, {:.2f}, {:.2f})'.format(self.index, *angle))
return False
return True
def get_angles(self, point):
"""
Convert a point to angles. Will throw exceptions.
:param point: (x, y, z).
:return: The angles.
"""
return self.ik_solver(self.lengths, point)
def update_position(self):
"""
Update current leg position based on servo data.
"""
a = math.radians(self.servos[0].get_position())
b = math.radians(self.servos[1].get_position())
c = math.radians(self.servos[2].get_position())
self.position = self.fk_solver(self.lengths, (a, b, c))
def get_position(self):
"""
Get the position of the leg. Update if necessary.
:return: Position (x, y, z).
"""
if self.position is None:
self.update_position()
return self.position
def __getitem__(self, key):
return self.servos[key]
def __add__(self, other):
return self.servos + other.servos
def __radd__(self, other):
return other + self.servos
def __len__(self):
return len(self.servos)
class Head:
def __init__(self, servo1, servo2, camera):
"""
Create a head object.
:param servo1: Servo object controlling left and right head turns.
:param servo2: Servo object controlling up and down head turns.
:param camera: A camera object for configuration.
"""
self.servos = [servo1, servo2]
self.camera = camera
self.angles = [0, 0]
self.target = [0, 0]
def at_bound(self):
"""
Check if the head is at the left or right bound.
:return: 1 -> left bound, -1 -> right bound, 0 -> not at bound.
"""
servo = self.servos[0]
low, high = servo.get_range()
position = servo.get_position()
# Within one 0.2 degrees is "there".
if abs(position - high) < 0.2:
return 1
elif abs(position - low) < 0.2:
return -1
else:
return 0
def __getitem__(self, item):
return self.servos[item]
def __len__(self):
return len(self.servos)
class Robot:
def __init__(self, leg1, leg2, leg3, leg4, body, head, bias=0):
"""
Define a robot.
:param leg1: Leg object.
:param leg2: Leg object.
:param leg3: Leg object.
:param leg4: Leg object.
:param body: Body object.
:param head: Head object.
:param bias: Rotational bias for body.
"""
# Define legs.
self.legs = [leg1, leg2, leg3, leg4]
self.leg_servos = [servo for leg in self.legs for servo in leg]
# Define head.
self.head = head
self.head_servos = [servo for servo in head]
# Define body.
self.body = body
class Agility:
def __init__(self, robot):
# Set up robot.
self.robot = robot
# Set error.
self.epsilon = 1e-6
# Set up Usc.
try:
self.usc = Usc()
logger.info("Successfully attached to Maestro's low-level interface.")
except ConnectionError:
self.usc = Dummy()
logger.warn("Failed to attached to Maestro's low-level interface. "
"If not debugging, consider this a fatal error.")
# Set up virtual COM and TTL ports.
try:
self.maestro = Maestro()
logger.info("Successfully attached to Maestro's command port.")
except ConnectionError:
self.maestro = Dummy()
logger.warn("Failed to attached to Maestro's command port. "
"If not debugging, consider this a fatal error.")
# Emergency stop.
self.emergency = Event()
# Zero.
self.zero()
def stop(self):
"""
Emergency stop. Stop all wait functions.
"""
self.emergency.set()
def clear(self):
"""
Clear emergency flag.
"""
self.emergency.clear()
def head_rotation(self):
"""
Provides head rotation.
:return: Head rotation in degrees.
"""
servo = self.robot.head[0]
self.maestro.get_position(servo)
return servo.get_position()
def set_head(self, target, t=0):
"""
Move the head to a given position.
Blocks until completion.
:param target: (LR, UD).
:param t: Time in ms. 0 for max speed.
"""
head = self.robot.head
servos = self.robot.head_servos
head[0].set_target(target[0])
head[1].set_target(target[1])
self.maestro.end_together(servos, t, True)
self.wait(servos)
def look_at(self, x, y):
"""
Move the head to look at a given target.
Note that this is an approximation. Best used in a PID loop.
:param x: x-coordinate of target.
:param y: y-coordinate of target.
"""
head = self.robot.head
camera = head.camera
# Define velocity constant.
k = 1.5
# Compute deltas.
dx = (x - 0.5 * camera.width) * -1
dy = (y - 0.5 * camera.height) * -1
dt = dx / camera.width * (camera.fx / 2)
dp = dy / camera.height * (camera.fy / 2)
# Compute suggested velocity. Balance between blur and speed.
vt = int(round(abs(dt * k)))
vp = int(round(abs(dt * k)))
# Construct array.
data = [dt, vt, dp, vp]
# Perform motion.
self.move_head(data)
# Update target.
head.target = [x, y]
return data
def scan(self, t, direction=None, block=False):
"""
Scans head in a direction. If no direction is given, scans toward bound of last known location.
If at minimum of maximum bounds, automatically selects opposite direction.
Blocks until completely scanned towards one direction.
:param t: Time in milliseconds.
:param direction: A direction, either None, 1, or -1.
:param block: Whether to wait until completion.
"""
# Obtain definitions.
head = self.robot.head
camera = head.camera
servo = head.servos[0]
# Get bounds.
low, high = servo.get_range()
# Update servo.
self.maestro.get_position(servo)
# Check bound.
bound = head.at_bound()
# Create direction.
if bound != 0:
direction = bound * -1
if direction is None:
if head.target[0] < 0.5 * camera.width:
direction = 1
else:
direction = -1
# Execute.
if direction == 1:
servo.set_target(high)
else:
servo.set_target(low)
self.maestro.end_in(servo, t)
if block:
self.wait(servo)
def center_head(self, t=0):
"""
Returns head to original position.
:param t: The time in ms.
"""
# Obtain definitions.
head = self.robot.head
servos = head.servos
# Target zero.
for servo in servos:
servo.set_target(0)
# Reset to zero.
head.angles = [0, 0]
# Execute.
self.maestro.end_together(servos, t, True)
self.wait(servos)
def move_head(self, data):
"""
Move head based on data parameters. Does not wait for completion.
:param data: An array given by look_at.
"""
# Obtain definitions.
head = self.robot.head
servos = head.servos
# Update positions.
self.maestro.get_multiple_positions(servos)
for i in range(2):
servo = head[i]
current = servo.get_position()
# Get data.
delta = data[i * 2]
velocity = data[i * 2 + 1]
if velocity == 0:
# Already at target. Do nothing.
servo.target = servo.pwm
target = current
else:
# Ensure that head is within bounds.
low, high = servo.get_range()
target = current + delta
if target < low:
target = low
elif target > high:
target = high
servo.set_target(target)
# Update.
head.angles[i] = target
# Set speed.
self.maestro.set_speed(servo, velocity)
# Execute.
self.maestro.set_target(servo)
@staticmethod
def plot_gait(frames):
"""
Plot a gait given some frames. Used for debugging.
:param frames: Frames generated by execute.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
x = frames[:, 0, 0]
y = frames[:, 0, 1]
z = frames[:, 0, 2]
ax.plot(x, y, z, marker='o')
plt.show()
def execute_forever(self, frames, dt):
"""
Like execute_frames(), except it runs forever.
:param frames: An array of frames.
:param dt: Delta t.
:return:
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
servos = self.robot.leg_servos
# Update initial leg locations.
self.maestro.get_multiple_positions(servos)
for leg in legs:
leg.get_position()
while True:
for frame in frames:
for i in range(4):
legs[i].target_point(frame[i])
self.maestro.end_together(servos, dt)
self.wait(servos)
def execute_frames(self, frames, dt):
"""
Execute some frames with a constant dt.
:param frames: An array of frames.
:param dt: Delta t.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
servos = self.robot.leg_servos
# Update initial leg locations.
self.maestro.get_multiple_positions(servos)
for frame in frames:
for i in range(4):
legs[i].target_point(frame[i])
self.maestro.end_together(servos, dt)
self.wait(servos)
def execute_long(self, prev_frame, frames, dt):
"""
Execute frames with constant but possibly long dt.
Automatically computes distance, and, if necessary, interpolates to get more accurate synchronization.
:param prev_frame: The previous frame.
:param frames: An array of frames.
:param dt: Delta t.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
servos = self.robot.leg_servos
# Define break constant (ms / cm).
k = 100
# Update initial leg locations.
self.maestro.get_multiple_positions(servos)
for frame in frames:
# Compute max distance.
d = max(np.linalg.norm(frame - prev_frame, axis=1))
# Less than break. Too long. Linearly interpolate.
if dt / d > k:
n = int(round(dt / d / k)) + 1
l_frames = self.smooth(prev_frame, frame, n)
l_frames = l_frames[1:]
# Compute time.
t = dt / n
# Execute intermediate frames.
for l_frame in l_frames:
for i in range(4):
legs[i].target_point(l_frame[i])
self.maestro.end_together(servos, t)
self.wait(servos)
else:
t = dt
for i in range(4):
legs[i].target_point(frame[i])
self.maestro.end_together(servos, t)
self.wait(servos)
prev_frame = frame
def execute_variable(self, frames, dts):
"""
Execute some frames with different dt.
:param frames: An array of frames.
:param dts: An array of dt.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
servos = self.robot.leg_servos
# Update initial leg locations.
self.maestro.get_multiple_positions(servos)
# Assertion check.
assert len(frames) == len(dts)
for t in range(len(frames)):
for i in range(4):
legs[i].target_point(frames[t][i])
self.maestro.end_together(servos, dts[t])
self.wait(servos)
def execute_angles(self, angles, dt):
"""
Like execute_frames(), but uses angles instead.
:param angles: An array of angles.
:param dt: Delta t.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
servos = self.robot.leg_servos
# Update initial leg locations.
self.maestro.get_multiple_positions(servos)
for angle in angles:
for i in range(4):
legs[i].target_angle(angle)
self.maestro.end_together(servos, dt)
self.wait(servos)
def anglify(self, frames):
"""
Converts frames generated by self.prepare to angles.
:param frames: The input frames.
:return: The output angles ready for execution.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
# Allocate memory.
angles = np.empty(frames.shape)
for i in range(len(frames)):
for l in range(4):
a = legs[l].get_angles(frames[i][l])
angles[i][l] = a
return angles
@staticmethod
def smooth(a, b, n):
"""
Create a smooth transition from a to b in n steps.
:param a: The first array.
:param b: The second array.
:param n: The number of steps.
:return: An array from [a, b).
"""
assert(a.shape == b.shape)
assert(n > 1)
# Compute delta.
delta = (b - a) / n
# Allocate n-1 with dimension d+1.
shape = (n, *a.shape)
inter = np.empty(shape)
for i in range(n):
inter[i] = a + i * delta
return inter
def get_pose(self):
"""
Get the relative pose of the robot.
:return: A (4 x 3) matrix representing the current state of the robot.
"""
# Get all legs for quick access.
legs = self.robot.legs
# Iterate through all legs.
pose = []
for leg in legs:
position = leg.get_position()
pose.append(position)
return np.array(pose, dtype=float)
def target_point(self, leg, point, t):
"""
Move a leg to a given point in t time.
Blocks until completion.
:param leg: Leg index.
:param point: (x, y, z).
:param t: Time in milliseconds.
"""
# Assertion check.
assert(0 <= leg <= 3)
# Get legs for quick access.
legs = self.robot.legs
# Target.
leg = legs[leg]
leg.target_point(point)
# Execute.
servos = leg.servos
self.maestro.end_together(servos, t, True)
# Block until completion.
self.wait(servos)
def lift_leg(self, leg, lift, t):
"""
Lift a leg (change pose) in t time.
Blcoks until completion.
:param leg: The leg index.
:param lift: How high to lift leg.
:param t: Time to execute pose change.
"""
# Assertion check.
assert (0 <= leg <= 3)
# Get legs for quick access.
legs = self.robot.legs
# Define ground.
ground = -max([leg.length for leg in legs]) + 1
# Empty pose.
pose = np.zeros((4, 3))
# Leg lift.
pose[:, 2] = ground
pose[leg][2] = ground + lift
# Execute.
self.target_pose(pose, t)
def target_pose(self, target, t, lift=2):
"""
Get the robot from its current pose to a new pose. Block until completion.
The robot will lift legs appropriately to eliminate dragging.
Automatically adjusts the center of mass during transition and target if necessary.
:param target: The target pose.
:param t: The total time for the adjustment.
:param lift: How much to lift each leg.
:return: (frames, dt) ready for execution.
"""
# Get body for quick access.
body = self.robot.body
# Create data array.
frames = []
# Get pose. Assume updated.
pose = self.get_pose()
# Early exit.
if np.array_equal(pose, target):
return
# Get ground, which is the lowest point.
curr_g = np.min(pose[:, 2])
next_g = np.min(target[:, 2])
# Generate leg state arrays.
pose_state = np.greater(pose[:, 2], (curr_g + self.epsilon)) # Defines which legs are in the air.
target_state = np.greater(target[:, 2], (next_g + self.epsilon)) # Defines which legs are in the air.
# Get all legs to (0, 0, curr_g) if they are in the air.
if any(pose_state):
f1 = pose.copy()
for i in range(4):
if pose_state[i]:
f1[i] = (0, 0, curr_g)
frames.append(f1)
# Define optimization procedure.
def up_down(ground):
# For every leg that is not at the right (x, y) and is on the ground in target, lift and down.
for i in range(4):
if not np.array_equal(pose[i][:2], target[i][:2]) and not target_state[i]:
# Get previous frame.
prev = frames[-1]
f4, f5 = prev.copy(), prev.copy()
# Move leg to target (x, y) in air.
x, y = target[i][:2]
f4[i] = (x, y, ground + lift)
# Compute bias and adjust.
s = [False, False, False, False]
s[i] = True
bias = body.adjust(s, f4, 1)
f3 = prev - bias
f4 -= bias
# Move leg down to target. Keep bias.
f5[i] = target[i]
f5 -= bias
# Append data.
frames.extend((f3, f4, f5))
def to_next():
f2 = pose.copy()
f2[:, 2] = next_g
frames.append(f2)
# Different optimization order.
if next_g > curr_g:
# For body high -> low, get legs to next height first.
to_next()
up_down(next_g)
elif curr_g > next_g:
# For body low -> high, get legs to next height last.
up_down(curr_g)
to_next()
# Move to final target if necessary.
if not np.array_equal(frames[-1], target):
if any(target_state):
prev = frames[-1]
bias = body.adjust(target_state, target)
frames.extend((prev - bias, target - bias))
else:
frames.append(target)
# Compute times. Assume equal dt.
dt = t / len(frames)
self.execute_long(pose, frames, dt)
def prepare_frames(self, frames, dt, ground):
"""
Prepare some frames which are non-circular (last frame not linked to first frame).
:param frames: The input frames.
:param dt: dt.
:param ground: Ground.
:param loop: Whether the gait loops.
:return: (frames, dt) ready for execution.
"""
# Define body for quick access.
body = self.robot.body
# Create array for biases.
biases = np.empty(frames.shape)
# Generate leg state arrays.
state1 = np.greater(frames[:, :, 2], (ground + self.epsilon)) # Defines which legs are in the air.
state2 = state1.sum(1) # The number of legs in the air.
# Define.
steps = len(frames)
for t in range(steps - 1):
# Look ahead and pass data to center of mass adjustment algorithms.
next_frame = frames[t]
# Determine which legs are off.
off = state1[t]
count = state2[t]
# Perform center of mass adjustments accordingly.
biases[t] = body.adjust(off, next_frame, count)
# Adjust frames.
frames -= biases
return frames, dt
def prepare_gait(self, gait, debug=False):
"""
Prepare a given gait class.
:param gait: The gait class.
:param debug: Show gait in a graph.
:return: (frames, dt) ready for execution.
"""
# Define body for quick access.
body = self.robot.body
# Get gait properties.
steps = gait.steps
ground = gait.ground
dt = gait.time / steps
ts = np.linspace(0, 1000, num=steps, endpoint=False)
# Get all legs for quick access.
legs = self.robot.legs
# Compute shape.
shape = (steps, 4, 3)
# Evaluate gait.
f = [gait.evaluate(leg, ts) for leg in legs]
frames = np.concatenate(f).reshape(shape, order='F')
# Debugging.
if debug:
self.plot_gait(frames)
# Create array for biases.
biases = np.empty(shape)
# Generate leg state arrays.
state1 = np.greater(biases[:, :, 2], (ground + 1e-6)) # Defines which legs are in the air.
state2 = state1.sum(1) # The number of legs in the air.
# Iterate and perform static analysis.
for t in range(steps):
# Look ahead and pass data to center of mass adjustment algorithms.
next_frame = frames[(t + 1) % steps]
# Determine which legs are off.
off = state1[t]
count = state2[t]
# Perform center of mass adjustments accordingly.
biases[t] = body.adjust(off, next_frame, count)
# Adjust frames.
frames -= biases
return frames, dt
def prepare_smoothly(self, gait):
"""
Prepare a gait by intelligently applying smoothing. Only works for planar COM adjustments.
Plus, who doesn't like smooth things? (I'm really tired right now.)
:param gait: The gait object.
:return: (frames, dt) ready for execution.
"""
# Define body for quick access.
body = self.robot.body
# Get gait properties.
steps = gait.steps
ground = gait.ground
dt = gait.time / steps
ts = np.linspace(0, 1000, num=steps, endpoint=False)
# Get all legs for quick access.
legs = self.robot.legs
# Compute shape.
shape = (steps, 4, 3)
# Evaluate gait.
f = [gait.evaluate(leg, ts) for leg in legs]
frames = np.concatenate(f).reshape(shape, order='F')
# Generate leg state arrays.
state1 = np.greater(frames[:, :, 2], (ground + 1e-6)) # Defines which legs are in the air.
state2 = state1.sum(1) # The number of legs in the air.
# Get indices of legs in air.
air = np.where(state2 != 0)[0]
air = air.tolist()
# Create array for biases.
biases = np.empty(shape)
# Keep track of last air -> ground.
t = air[-1]
if state2[(t + 1) % steps] == 0:
# Last air frame is an air -> ground transition.
last_ag = t
else:
# There will
last_ag = None
# Compute biases for each frame that is not on the ground.
for i in range(len(air)):
# Get the index relative to all frames.
t = air[i]
# Compute bias as usual.
next_frame = frames[(t + 1) % steps]
off = state1[t]
count = state2[t]
biases[t] = body.adjust(off, next_frame, count)
# Checks if the current frame represents a ground -> air transition.
if state2[t - 1] == 0:
curr_bias = biases[t]
prev_bias = biases[last_ag]
# Smooth from [t, last_ag).
if t > last_ag:
n = t - last_ag
inter = self.smooth(prev_bias, curr_bias, n)
biases[last_ag:t] = inter
else:
n = steps - last_ag + t
inter = self.smooth(prev_bias, curr_bias, n)
biases[last_ag:] = inter[:(steps - last_ag)]
biases[:t] = inter[(steps - last_ag):]
# Check if the current frame represents an air -> ground transition.
if state2[(t + 1) % steps] == 0:
last_ag = t
# Adjust frames.
frames -= biases
return frames, dt
def move_body(self, x, y, z, t=0):
"""
Move the body some x, y, and z.
:param x: Move x.
:param y: Move y.
:param z: Move z.
:param t: The time in ms.
"""
legs = self.robot.legs
servos = self.robot.leg_servos
self.maestro.get_multiple_positions(servos)
for leg in legs:
a, b, c = leg.get_position()
a -= x
b -= y
c -= z
leg.target_point((-x, -y, -leg.length - z))
self.maestro.end_together(servos, t)
self.wait(servos)
def configure(self):
"""
Configure the Maestro by writing home positions and other configuration data to the device.
"""
settings = self.usc.getUscSettings()
settings.serialMode = uscSerialMode.SERIAL_MODE_USB_DUAL_PORT
for leg in self.robot.legs:
for servo in leg:
servo.zero()
channel = settings.channelSettings[servo.channel]
channel.mode = ChannelMode.Servo
channel.homeMode = HomeMode.Goto
channel.home = servo.target
channel.minimum = (servo.min_pwm // 64) * 64
channel.maximum = -(-servo.max_pwm // 64) * 64
for servo in self.robot.head:
servo.zero()
channel = settings.channelSettings[servo.channel]
channel.mode = ChannelMode.Servo
channel.homeMode = HomeMode.Goto
channel.home = servo.target
channel.minimum = (servo.min_pwm // 64) * 64
channel.maximum = -(-servo.max_pwm // 64) * 64
self.usc.setUscSettings(settings, False)
self.usc.reinitialize(500)
def go_home(self):
"""
Let the Maestro return all servos to home.
"""
self.maestro.go_home()
def ready(self, z, t=2000):
"""
Ready a gait by lower robot to plane.
:param z: Height of gait.
:param t: Time in milliseconds
"""
# Compute desired pose.
pose = np.zeros((4, 3))
pose[:, 2] = z
# Execute position.
self.target_pose(pose, t)
def zero(self):
"""
Manual return home by resetting all leg servo targets.
"""
# Get all legs and servos for quick access.
legs = self.robot.legs
s1 = self.robot.leg_servos
for leg in legs:
z = -leg.length
leg.target_point((0, 0, z))
# Execute.
self.set_head((0, 0), 1000)
self.maestro.end_together(s1, 1000, True)
# Wait until completion.
self.wait()
def wait(self, servos=None):
"""
Block until all servos have reached their targets.
:param servos: An array of servos. If None, checks if all servos have reached their targets (more efficient).
"""
while not self.is_at_target(servos=servos) and not self.emergency.is_set():
time.sleep(0.001)
def is_at_target(self, servos=None):
"""
Check if servos are at their target.
:param servos: One or more servo objects. If None, checks if all servos have reached their targets (more efficient).
:return: True if all servos are at their targets, False otherwise.
"""
if servos is None:
return not self.maestro.get_moving_state()
elif isinstance(servos, Servo):
self.maestro.get_position(servos)
if servos.at_target():
return True
return False
else:
self.maestro.get_multiple_positions(servos)
if all(servo.at_target() for servo in servos):
return True
return False
| mit | -7,332,994,944,111,070,000 | 27.934645 | 124 | 0.52898 | false |
torgartor21/solar | solar/solar/interfaces/db/redis_graph_db.py | 1 | 9405 | import json
import redis
import fakeredis
from .base import BaseGraphDB, Node, Relation
from .redis_db import OrderedHash
class RedisGraphDB(BaseGraphDB):
DB = {
'host': 'localhost',
'port': 6379,
}
REDIS_CLIENT = redis.StrictRedis
def __init__(self):
self._r = self.REDIS_CLIENT(**self.DB)
self.entities = {}
def node_db_to_object(self, node_db):
if isinstance(node_db, Node):
return node_db
return Node(
self,
node_db['name'],
[node_db['collection']],
node_db['properties']
)
def relation_db_to_object(self, relation_db):
if isinstance(relation_db, Relation):
return relation_db
if relation_db['type_'] == BaseGraphDB.RELATION_TYPES.input_to_input.name:
source_collection = BaseGraphDB.COLLECTIONS.input
dest_collection = BaseGraphDB.COLLECTIONS.input
elif relation_db['type_'] == BaseGraphDB.RELATION_TYPES.resource_input.name:
source_collection = BaseGraphDB.COLLECTIONS.resource
dest_collection = BaseGraphDB.COLLECTIONS.input
elif relation_db['type_'] == BaseGraphDB.RELATION_TYPES.resource_event.name:
source_collection = BaseGraphDB.COLLECTIONS.resource
dest_collection = BaseGraphDB.COLLECTIONS.events
source = self.get(relation_db['source'], collection=source_collection)
dest = self.get(relation_db['dest'], collection=dest_collection)
return Relation(
self,
source,
dest,
relation_db['properties']
)
def all(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
"""Return all elements (nodes) of type `collection`."""
key_glob = self._make_collection_key(collection, '*')
for result in self._all(key_glob):
yield result
def all_relations(self, type_=BaseGraphDB.DEFAULT_RELATION):
"""Return all relations of type `type_`."""
key_glob = self._make_relation_key(type_, '*')
for result in self._all(key_glob):
yield result
def _all(self, key_glob):
keys = self._r.keys(key_glob)
with self._r.pipeline() as pipe:
pipe.multi()
values = [self._r.get(key) for key in keys]
pipe.execute()
for value in values:
yield json.loads(value)
def clear(self):
"""Clear the whole DB."""
self._r.flushdb()
def clear_collection(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
"""Clear all elements (nodes) of type `collection`."""
key_glob = self._make_collection_key(collection, '*')
self._r.delete(self._r.keys(key_glob))
def create(self, name, properties={}, collection=BaseGraphDB.DEFAULT_COLLECTION):
"""Create element (node) with given name, properties, of type `collection`."""
if isinstance(collection, self.COLLECTIONS):
collection = collection.name
properties = {
'name': name,
'properties': properties,
'collection': collection,
}
self._r.set(
self._make_collection_key(collection, name),
json.dumps(properties)
)
return properties
def create_relation(self,
source,
dest,
properties={},
type_=BaseGraphDB.DEFAULT_RELATION):
"""
Create relation (connection) of type `type_` from source to dest with
given properties.
"""
return self.create_relation_str(
source.uid, dest.uid, properties, type_=type_)
def create_relation_str(self, source, dest,
properties={}, type_=BaseGraphDB.DEFAULT_RELATION):
if isinstance(type_, self.RELATION_TYPES):
type_ = type_.name
uid = self._make_relation_uid(source, dest)
properties = {
'source': source,
'dest': dest,
'properties': properties,
'type_': type_,
}
self._r.set(
self._make_relation_key(type_, uid),
json.dumps(properties)
)
return properties
def get(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION,
return_empty=False):
"""Fetch element with given name and collection type."""
try:
collection_key = self._make_collection_key(collection, name)
item = self._r.get(collection_key)
if not item and return_empty:
return item
return json.loads(item)
except TypeError:
raise KeyError(collection_key)
def delete(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION):
keys = self._r.keys(self._make_collection_key(collection, name))
if keys:
self._r.delete(*keys)
def get_or_create(self,
name,
properties={},
collection=BaseGraphDB.DEFAULT_COLLECTION):
"""
Fetch or create element (if not exists) with given name, properties of
type `collection`.
"""
try:
return self.get(name, collection=collection)
except KeyError:
return self.create(name, properties=properties, collection=collection)
def _relations_glob(self,
source=None,
dest=None,
type_=BaseGraphDB.DEFAULT_RELATION):
if source is None:
source = '*'
else:
source = source.uid
if dest is None:
dest = '*'
else:
dest = dest.uid
return self._make_relation_key(type_, self._make_relation_uid(source, dest))
def delete_relations(self,
source=None,
dest=None,
type_=BaseGraphDB.DEFAULT_RELATION,
has_properties=None):
"""Delete all relations of type `type_` from source to dest."""
glob = self._relations_glob(source=source, dest=dest, type_=type_)
keys = self._r.keys(glob)
if not keys:
return
if not has_properties:
self._r.delete(*keys)
rels = self.get_relations(
source=source, dest=dest, type_=type_, has_properties=has_properties
)
for r in rels:
self.delete_relations(
source=r.start_node,
dest=r.end_node,
type_=type_
)
def get_relations(self,
source=None,
dest=None,
type_=BaseGraphDB.DEFAULT_RELATION,
has_properties=None):
"""Fetch all relations of type `type_` from source to dest."""
glob = self._relations_glob(source=source, dest=dest, type_=type_)
def check_has_properties(r):
if has_properties:
for k, v in has_properties.items():
if not r['properties'].get(k) == v:
return False
return True
for r in self._all(glob):
# Glob is primitive, we must filter stuff correctly here
if source and r['source'] != source.uid:
continue
if dest and r['dest'] != dest.uid:
continue
if not check_has_properties(r):
continue
yield r
def get_relation(self, source, dest, type_=BaseGraphDB.DEFAULT_RELATION):
"""Fetch relations with given source, dest and type_."""
uid = self._make_relation_key(source.uid, dest.uid)
try:
return json.loads(
self._r.get(self._make_relation_key(type_, uid))
)
except TypeError:
raise KeyError
def get_or_create_relation(self,
source,
dest,
properties=None,
type_=BaseGraphDB.DEFAULT_RELATION):
"""Fetch or create relation with given properties."""
properties = properties or {}
try:
return self.get_relation(source, dest, type_=type_)
except KeyError:
return self.create_relation(source, dest, properties=properties, type_=type_)
def _make_collection_key(self, collection, _id):
if isinstance(collection, self.COLLECTIONS):
collection = collection.name
# NOTE: hiera-redis backend depends on this!
return '{0}:{1}'.format(collection, _id)
def _make_relation_uid(self, source, dest):
"""
There can be only one relation from source to dest, that's why
this function works.
"""
return '{0}-{1}'.format(source, dest)
def _make_relation_key(self, type_, _id):
if isinstance(type_, self.RELATION_TYPES):
type_ = type_.name
# NOTE: hiera-redis backend depends on this!
return '{0}:{1}'.format(type_, _id)
def get_ordered_hash(self, collection):
return OrderedHash(self._r, collection)
class FakeRedisGraphDB(RedisGraphDB):
REDIS_CLIENT = fakeredis.FakeStrictRedis
| apache-2.0 | -3,255,286,357,683,565,600 | 30.560403 | 89 | 0.547581 | false |
jainanisha90/WeVoteServer | search/query_test_script.py | 1 | 8852 | #!/usr/bin/env python
# Test this by entering the search string "election" on a command line like this:
# /home/wevote/WeVoteServer/search/query_test_script.py election
from elasticsearch import Elasticsearch
import sys
es = Elasticsearch(["172.31.24.246:9200"], timeout = 120, max_retries = 5, retry_on_timeout = True)
if len(sys.argv) < 2:
print "Usage: %s <search term>" % (sys.argv[0])
sys.exit(-1)
search_term = sys.argv[1]
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "candidate_name", "candidate_twitter_handle", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "first_name", "middle_name", "last_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "google_civic_election_id", "candidate_name", "candidate_twitter_handle", "election_name", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query_with_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_last_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "_last", "order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_election_date_without_order = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11"}},
{"_score": {"order": "desc"}}]}
query_with_election_missing_date_value = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11", "order": "desc"}},
{"_score": {"order": "desc"}}]}
# Example of querying ALL indexes
res = es.search(body=query)
res_with_election_date = es.search(body=query_with_election_date)
res_with_missing_last_election_date = es.search(body=query_with_missing_last_election_date)
# res_with_missing_election_date_without_order = es.search(body=query_with_missing_election_date_without_order)
# res_with_election_missing_date_value = es.search(body=query_with_election_missing_date_value)
print "Got %d hits from all index search: " % res['hits']['total']
print "Got %d hits from all index search: " % res_with_election_date['hits']['total']
print "Got %d hits from all index search: " % res_with_missing_last_election_date['hits']['total']
# print "Got %d hits from all index search: " % res_with_missing_election_date_without_order['hits']['total']
# print "Got %d hits from all index search: " % res_with_election_missing_date_value['hits']['total']
for hit in res['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
print "============================================"
for hit in res_with_election_date['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
print "============================================"
for hit in res_with_missing_last_election_date['hits']['hits']:
print "------------- RESULT --------------"
for field in hit:
print "%s: %s" % (field, hit[field])
print "============================================"
# print "============================================"
# for hit in res_with_missing_election_date_without_order['hits']['hits']:
# print "------------- RESULT --------------"
# for field in hit:
# print "%s: %s" % (field, hit[field])
# print "============================================"
# print "============================================"
# for hit in res_with_election_missing_date_value['hits']['hits']:
# print "------------- RESULT --------------"
# for field in hit:
# print "%s: %s" % (field, hit[field])
# example of querying single index
if (True):
res = es.search(index="elections", body={ "query": {"match": { "google_civic_election_id": "5000"}}})
print "Got %d hits from single index search: " % res['hits']['total']
for hit in res['hits']['hits']:
for field in hit:
print "%s: %s" % (field, hit[field])
| mit | -3,072,688,002,611,289,600 | 72.766667 | 361 | 0.405219 | false |
bourguet/operator_precedence_parsing | modified_operator_precedence.py | 1 | 9379 | #! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
def __repr__(self):
return '<Symbol {} {}/{}>'.format(self.symbol, self.lprio, self.rprio)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.presymbols = {}
self.presymbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.postsymbols = {}
self.postsymbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
def register_presymbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = unary_evaluator
if type(oper) is str:
self.presymbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.presymbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def register_postsymbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.postsymbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.postsymbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def advance(self):
try:
self.cur_token = self.lexer.__next__()
except StopIteration:
self.cur_token = None
def reset(self, s):
self.lexer = lexer.tokenize(s)
self.advance()
self.stack = [self.presymbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 999, 1000, identity_evaluator)
def evaluate_handle(self, args):
for i in args:
if type(i) == SymbolDesc:
return i.evaluator(args)
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack[idx:] = []
self.stack.append(self.evaluate_handle(args))
def tos_symbol(self):
idx = len(self.stack)-1
while type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def cur_sym(self, allow_presymbol):
if self.cur_token is None:
return None
elif self.cur_token.kind == 'ID':
return self.id_symbol(self.cur_token)
elif self.cur_token.kind == 'NUMBER':
return self.id_symbol(self.cur_token)
elif allow_presymbol and self.cur_token.lexem in self.presymbols:
return self.presymbols[self.cur_token.lexem]
elif self.cur_token.lexem in self.postsymbols:
return self.postsymbols[self.cur_token.lexem]
else:
return None
def parse(self, s):
self.reset(s)
while True:
sym = self.cur_sym(type(self.stack[-1]) == SymbolDesc)
if sym is None:
break
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
sym = self.cur_sym(False)
self.stack.append(sym)
self.advance()
while len(self.stack) > 2 or (len(self.stack) == 2 and type(self.stack[-1]) == SymbolDesc):
self.evaluate()
if len(self.stack) == 1:
res = None
elif len(self.stack) == 2:
res = self.stack[1]
if self.cur_token is not None:
res = CompositeNode('REMAINING INPUT', [res, self.cur_token])
return res
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
if (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '['
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ']'):
return CompositeNode('get', [args[0], args[2]])
else:
return CompositeNode('[ ERROR', args)
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_postsymbol(',', 2, 2, coma_evaluator)
parser.register_postsymbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_postsymbol('?', 7, 1, question_evaluator)
parser.register_postsymbol(':', 1, 6, colon_evaluator)
parser.register_postsymbol('||', 8, 9)
parser.register_postsymbol('&&', 10, 11)
parser.register_postsymbol('|', 12, 13)
parser.register_postsymbol('^', 14, 15)
parser.register_postsymbol('&', 16, 17)
parser.register_postsymbol(['==', '!='], 18, 19)
parser.register_postsymbol(['<', '>', '<=', '>='], 20, 21)
parser.register_postsymbol(['<<', '>>'], 22, 23)
parser.register_postsymbol(['+', '-'], 24, 25)
parser.register_postsymbol(['/', '%', '*'], 26, 27)
parser.register_postsymbol('**', 29, 28)
parser.register_presymbol(['+', '-', '++', '--', '~', '!', '&', '*'], 31, 30, unary_evaluator)
parser.register_postsymbol(['++', '--'], 32, 33, unary_evaluator)
parser.register_postsymbol(['.', '->'], 32, 33)
parser.register_postsymbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_postsymbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_postsymbol('[', 100, 1, open_bracket_evaluator)
parser.register_postsymbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| bsd-2-clause | 3,458,328,838,560,375,000 | 34.661597 | 115 | 0.565625 | false |
bessl/Wetterfrosch | www/www/tests.py | 1 | 2748 | import unittest
import transaction
import datetime
from pyramid import testing
from .models import DBSession, Log
from www.views import *
def _initTestingDB():
from sqlalchemy import create_engine
from www.models import (
DBSession,
Log,
Base
)
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
DBSession.configure(bind=engine)
with transaction.manager:
model = Log(id=1, at=datetime.datetime.now(), temperature=33, humidity=41)
DBSession.add(model)
model = Log(id=2, at=datetime.datetime.now(), temperature=13, humidity=40)
DBSession.add(model)
return DBSession
# class CurrentDataTests(unittest.TestCase):
# def setUp(self):
# self.session = _initTestingDB()
# self.config = testing.setUp()
# self.config.add_route('current', 'current.json')
#
# def tearDown(self):
# self.session.remove()
# testing.tearDown()
#
# def test_current_data(self):
# request = testing.DummyRequest()
# response = current(request)
# self.assertEqual(response["temperature"], 13)
# self.assertEqual(response["humidity"], 40)
#
#
# class AddMeasurementDataTests(unittest.TestCase):
# def setUp(self):
# self.session = _initTestingDB()
# self.config = testing.setUp()
# self.config.add_route('add', 'add/')
#
# def tearDown(self):
# self.session.remove()
# testing.tearDown()
#
# def test_only_post_request_allowed(self):
# request = testing.DummyRequest(params={'t': 12, 'h': 34}, post=None)
# response = add(request)
# self.assertEqual(response.status_code, 500)
#
# def test_only_full_data_request_allowed(self):
# request = testing.DummyRequest(params={'t': 12}, post=True)
# response = add(request)
# self.assertEqual(response.status_code, 500)
#
# request2 = testing.DummyRequest(params={'h': 99}, post=True)
# response2 = add(request2)
# self.assertEqual(response2.status_code, 500)
#
# def test_save_request(self):
# request = testing.DummyRequest(params={'t': 23, 'h': 24}, post=True)
# response = add(request)
# self.assertEqual(response.status_code, 200)
#
# m = self.session.query(Log).filter_by(temperature=23).one()
# self.assertEqual(m.humidity, 24)
#
# count_current = len(self.session.query(Log).all())
# self.assertEqual(count_current, 3)
#
# self.assertEqual(m.id, 3)
# self.assertEqual(m.humidity, 24)
# self.assertEqual(m.temperature, 23)
# self.assertEqual(m.at.strftime("%d%m%Y%H"), datetime.datetime.now().strftime("%d%m%Y%H"))
| lgpl-3.0 | 430,036,750,184,190,900 | 32.512195 | 99 | 0.621543 | false |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61970/Meas/MeasMeasurement.py | 1 | 2367 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CoreIdentifiedObject import CoreIdentifiedObject
class MeasMeasurement(CoreIdentifiedObject):
def __init__(self, PowerSystemResource=None, *args, **kw_args):
"""Initialises a new 'MeasMeasurement' instance.
@param PowerSystemResource:
"""
self._PowerSystemResource = None
self.PowerSystemResource = PowerSystemResource
super(MeasMeasurement, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["PowerSystemResource"]
_many_refs = []
def getPowerSystemResource(self):
"""
"""
return self._PowerSystemResource
def setPowerSystemResource(self, value):
if self._PowerSystemResource is not None:
filtered = [x for x in self.PowerSystemResource.Measurements if x != self]
self._PowerSystemResource._Measurements = filtered
self._PowerSystemResource = value
if self._PowerSystemResource is not None:
if self not in self._PowerSystemResource._Measurements:
self._PowerSystemResource._Measurements.append(self)
PowerSystemResource = property(getPowerSystemResource, setPowerSystemResource)
| mit | 2,298,372,686,646,743,600 | 39.810345 | 89 | 0.718209 | false |
wbonnet/sbit | sbit/cli_command.py | 1 | 2965 | #
# The contents of this file are subject to the Apache 2.0 license you may not
# use this file except in compliance with the License.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
#
# Copyright 2017 DFT project (http://www.debianfirmwaretoolkit.org).
# All rights reserved. Use is subject to license terms.
#
#
# Contributors list :
#
# William Bonnet [email protected], [email protected]
#
#
""" This module implements The base class and functionnalities used by all the
cli targets.
"""
import subprocess
from sbit.model import Key
# -----------------------------------------------------------------------------
#
# Class CliCommand
#
# -----------------------------------------------------------------------------
class CliCommand(object):
"""This class implements the base class used for all command from cli
It provides method used in all the derivated command, such has
command execution and error handling, qemu setup and tear down, etc
"""
# -------------------------------------------------------------------------
#
# __init__
#
# -------------------------------------------------------------------------
def __init__(self, configuration):
"""Default constructor
"""
# Object storing the configuration definition. holds all the
# configuration and definition used by the different stage of
# the tool execution
self.cfg = configuration
# -------------------------------------------------------------------------
#
# execute_command
#
# -------------------------------------------------------------------------
def execute_command(self, command):
""" This method run a command as a subprocess. Typical use case is
running commands.
This method is a wrapper to subprocess.run , and will be moved soon
in a helper object. It provides mutalisation of error handling
"""
self.cfg.logging.debug("running : " + command)
try:
# Execute the subprocess, output ans errors are piped
completed = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, check=True, universal_newlines=False)
# Return the output of the process to the caller
return completed.returncode, completed.stdout, completed.stderr
# We catch xecutionerror, but continue execution and return completed structure to the caller
# It has to be done since we execute tests that can fail. Thus global execution hould not stop
# on first error
except subprocess.CalledProcessError as exception:
# Return the output of the process to the caller
return exception.returncode, exception.stdout.decode(Key.UTF8.value), \
exception.stderr.decode(Key.UTF8.value)
| apache-2.0 | 6,353,952,181,077,633,000 | 33.08046 | 98 | 0.597976 | false |
sxjscience/tvm | tutorials/get_started/tvmc_command_line_driver.py | 1 | 12118 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Getting Started with TVM command line driver - TVMC
===================================================
**Authors**:
`Leandro Nunes <https://github.com/leandron>`_,
`Matthew Barrett <https://github.com/mbaret>`_
This tutorial is an introduction to working with TVMC, the TVM command
line driver. TVMC is a tool that exposes TVM features such as
auto-tuning, compiling, profiling and execution of models, via a
command line interface.
In this tutorial we are going to use TVMC to compile, run and tune a
ResNet-50 on a x86 CPU.
We are going to start by downloading ResNet 50 V2. Then, we are going
to use TVMC to compile this model into a TVM module, and use the
compiled module to generate predictions. Finally, we are going to experiment
with the auto-tuning options, that can be used to help the compiler to
improve network performance.
The final goal is to give an overview of TVMC's capabilities and also
some guidance on where to look for more information.
"""
######################################################################
# Using TVMC
# ----------
#
# TVMC is a Python application, part of the TVM Python package.
# When you install TVM using a Python package, you will get TVMC as
# as a command line application called ``tvmc``.
#
# Alternatively, if you have TVM as a Python module on your
# ``$PYTHONPATH``,you can access the command line driver functionality
# via the executable python module, ``python -m tvm.driver.tvmc``.
#
# For simplicity, this tutorial will mention TVMC command line using
# ``tvmc <options>``, but the same results can be obtained with
# ``python -m tvm.driver.tvmc <options>``.
#
# You can check the help page using:
#
# .. code-block:: bash
#
# tvmc --help
#
#
# As you can see in the help page, the main features are
# accessible via the subcommands ``tune``, ``compile`` and ``run``.
# To read about specific options under a given subcommand, use
# ``tvmc <subcommand> --help``.
#
# In the following sections we will use TVMC to tune, compile and
# run a model. But first, we need a model.
#
######################################################################
# Obtaining the model
# -------------------
#
# We are going to use ResNet-50 V2 as an example to experiment with TVMC.
# The version below is in ONNX format. To download the file, you can use
# the command below:
#
# .. code-block:: bash
#
# wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx
#
#
######################################################################
# .. note:: Supported model formats
#
# TVMC supports models created with Keras, ONNX, TensorFlow, TFLite
# and Torch. Use the option``--model-format`` if you need to
# explicitly provide the model format you are using. See ``tvmc
# compile --help`` for more information.
#
######################################################################
# Compiling the model
# -------------------
#
# The next step once we've downloaded ResNet-50, is to compile it,
# To accomplish that, we are going to use ``tvmc compile``. The
# output we get from the compilation process is a TAR package,
# that can be used to run our model on the target device.
#
# .. code-block:: bash
#
# tvmc compile \
# --target "llvm" \
# --output compiled_module.tar \
# resnet50-v2-7.onnx
#
# Once compilation finishes, the output ``compiled_module.tar`` will be created. This
# can be directly loaded by your application and run via the TVM runtime APIs.
#
######################################################################
# .. note:: Defining the correct target
#
# Specifying the correct target (option ``--target``) can have a huge
# impact on the performance of the compiled module, as it can take
# advantage of hardware features available on the target. For more
# information, please refer to `Auto-tuning a convolutional network
# for x86 CPU <https://tvm.apache.org/docs/tutorials/autotvm/tune_relay_x86.html#define-network>`_.
#
######################################################################
#
# In the next step, we are going to use the compiled module, providing it
# with some inputs, to generate some predictions.
#
######################################################################
# Input pre-processing
# --------------------
#
# In order to generate predictions, we will need two things:
#
# - the compiled module, which we just produced;
# - a valid input to the model
#
# Each model is particular when it comes to expected tensor shapes, formats and data
# types. For this reason, most models require some pre and
# post processing, to ensure the input(s) is valid and to interpret the output(s).
#
# In TVMC, we adopted NumPy's ``.npz`` format for both input and output data.
# This is a well-supported NumPy format to serialize multiple arrays into a file.
#
# We will use the usual cat image, similar to other TVM tutorials:
#
# .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg
# :height: 224px
# :width: 224px
# :align: center
#
# For our ResNet 50 V2 model, the input is expected to be in ImageNet format.
# Here is an example of a script to pre-process an image for ResNet 50 V2.
#
from tvm.contrib.download import download_testdata
from PIL import Image
import numpy as np
img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg"
img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
# Resize it to 224x224
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
# ONNX expects NCHW input, so convert the array
img_data = np.transpose(img_data, (2, 0, 1))
# Normalize according to ImageNet
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_stddev = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype("float32")
for i in range(img_data.shape[0]):
norm_img_data[i, :, :] = (img_data[i, :, :] / 255 - imagenet_mean[i]) / imagenet_stddev[i]
# Add batch dimension
img_data = np.expand_dims(norm_img_data, axis=0)
# Save to .npz (outputs imagenet_cat.npz)
np.savez("imagenet_cat", data=img_data)
######################################################################
# Running the compiled module
# ---------------------------
#
# With both the compiled module and input file in hand, we can run it by
# invoking ``tvmc run``.
#
# .. code-block:: bash
#
# tvmc run \
# --inputs imagenet_cat.npz \
# --output predictions.npz \
# compiled_module.tar
#
# When running the above command, a new file ``predictions.npz`` should
# be produced. It contains the output tensors.
#
# In this example, we are running the model on the same machine that we used
# for compilation. In some cases we might want to run it remotely via
# an RPC Tracker. To read more about these options please check ``tvmc
# run --help``.
#
######################################################################
# Output post-processing
# ----------------------
#
# As previously mentioned, each model will have its own particular way
# of providing output tensors.
#
# In our case, we need to run some post-processing to render the
# outputs from ResNet 50 V2 into a more human-readable form.
#
# The script below shows an example of the post-processing to extract
# labels from the output of our compiled module.
#
import os.path
import numpy as np
from scipy.special import softmax
from tvm.contrib.download import download_testdata
# Download a list of labels
labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt"
labels_path = download_testdata(labels_url, "synset.txt", module="data")
with open(labels_path, "r") as f:
labels = [l.rstrip() for l in f]
output_file = "predictions.npz"
# Open the output and read the output tensor
if os.path.exists(output_file):
with np.load(output_file) as data:
scores = softmax(data["output_0"])
scores = np.squeeze(scores)
scores = np.argsort(scores)[::-1]
for i in scores[0:5]:
print("class='%s' with probability=%f" % (labels[i], scores[i]))
########################################################################
# When running the script, a list of predictions should be printed similar
# the the example below.
#
# .. code-block:: bash
#
# $ python post_processing.py
# class=n02123045 tabby, tabby cat ; probability=446.000000
# class=n02123159 tiger cat ; probability=675.000000
# class=n02124075 Egyptian cat ; probability=836.000000
# class=n02129604 tiger, Panthera tigris ; probability=917.000000
# class=n04040759 radiator ; probability=213.000000
#
######################################################################
# Tuning the model
# ----------------
#
# In some cases, we might not get the expected performance when running
# inferences using our compiled module. In cases like this, we can make use
# of the auto-tuner, to find a better configuration for our model and
# get a boost in performance.
#
# Tuning in TVM refers to the process by which a model is optimized
# to run faster on a given target. This differs from training or
# fine-tuning in that it does not affect the accuracy of the model,
# but only the runtime performance.
#
# As part of the tuning process, TVM will try running many different
# operator implementation variants to see which perform best. The
# results of these runs are stored in a tuning records file, which is
# ultimately the output of the ``tune`` subcommand.
#
# In the simplest form, tuning requires you to provide three things:
#
# - the target specification of the device you intend to run this model on;
# - the path to an output file in which the tuning records will be stored, and finally,
# - a path to the model to be tuned.
#
#
# The example below demonstrates how that works in practice:
#
# .. code-block:: bash
#
# tvmc tune \
# --target "llvm" \
# --output autotuner_records.json \
# resnet50-v2-7.onnx
#
#
# Tuning sessions can take a long time, so ``tvmc tune`` offers many options to
# customize your tuning process, in terms of number of repetitions (``--repeat`` and
# ``--number``, for example), the tuning algorithm to be use, and so on.
# Check ``tvmc tune --help`` for more information.
#
# As an output of the tuning process above, we obtained the tuning records stored
# in ``autotuner_records.json``. This file can be used in two ways:
#
# - as an input to further tuning (via ``tvmc tune --tuning-records``), or
# - as an input to the compiler
#
# The compiler will use the results to generate high performance code for the model
# on your specified target. To do that we can use ``tvmc compile --tuning-records``.
# Check ``tvmc compile --help`` for more information.
#
######################################################################
# Final Remarks
# -------------
#
# In this tutorial, we presented TVMC, a command line driver for TVM.
# We demonstrated how to compile, run and tune a model, as well
# as discussed the need for pre and post processing of inputs and outputs.
#
# Here we presented a simple example using ResNet 50 V2 locally. However, TVMC
# supports many more features including cross-compilation, remote execution and
# profiling/benchmarking.
#
# To see what other options are available, please have a look at ``tvmc --help``.
#
| apache-2.0 | 2,922,419,167,132,378,000 | 35.065476 | 104 | 0.661825 | false |
pleed/pyqemu | target-i386/pyqemu/processinfo.py | 1 | 26210 | #!/usr/bin/python
import PyFlxInstrument
from Structures import *
# --- class Image ------------------------------------------------------
class Image( object):
def get_entrypoint( self):
try:
return self.cached.entrypoint
except:
return self.ldr_data_table_entry.EntryPoint
def get_sizeofimage( self):
try:
return self.cached.sizeofimage
except:
return self.ldr_data_table_entry.SizeOfImage
def get_dllbase( self):
try:
return self.cached.dllbase
except:
return self.ldr_data_table_entry.DllBase
def get_fulldllname( self):
try:
return self.cached.fulldllname
except:
return self.ldr_data_table_entry.FullDllName.str()
def get_basedllname( self):
try:
return self.cached.basedllname
except:
return self.ldr_data_table_entry.BaseDllName.str()
EntryPoint = property( get_entrypoint)
SizeOfImage = property( get_sizeofimage)
DllBase = property( get_dllbase)
FullDllName = property( get_fulldllname)
BaseDllName = property( get_basedllname)
Name = property( get_basedllname) # for compatibility with a yet-to-be-implemented general memory range class
def __init__( self, ldr_data_table_entry, process):
self.ldr_data_table_entry = ldr_data_table_entry
self.process = process
self.valid = False
self.exports_done = False
self.exports = {}
self.last_executed_page = None
self.image_type = IMAGE_TYPE_UNKNOWN
self.cached = GenericStruct()
self.pending_pages = set( [])
self.dump_pending = False
self.update()
def update( self):
# sanity check the LDR_DATA_TABLE_ENTRY struct:
# - Check whether DllBase is on a page boundary
# - Check whether EntryPoint is within [DllBase, DllBase+SizeOfImage) or 0
# - Check whether the entire DLL resides in userspace?
# - Check whether SizeOfImage is a multiple of the page size
# - Check whether SizeOfImage != 0
valid = self.valid
if not valid:
valid = True
valid = valid and not (self.ldr_data_table_entry.DllBase % PAGESIZE)
valid = valid and self.ldr_data_table_entry.EntryPoint >= self.ldr_data_table_entry.DllBase \
and self.ldr_data_table_entry.EntryPoint < self.ldr_data_table_entry.DllBase + self.ldr_data_table_entry.SizeOfImage
valid = valid and self.ldr_data_table_entry.DllBase < USER_KERNEL_SPLIT \
and self.ldr_data_table_entry.DllBase + self.ldr_data_table_entry.SizeOfImage < USER_KERNEL_SPLIT
valid = valid and not (self.ldr_data_table_entry.SizeOfImage % PAGESIZE)
valid = valid and self.ldr_data_table_entry.SizeOfImage != 0
# if we cannot yet fetch the FullDllName, try again later
try:
fulldllname = self.ldr_data_table_entry.FullDllName.str()
except PageFaultException, pagefault:
valid = False
self.pending_pages.add( pagefault.value / PAGESIZE)
#PyBochsC.pending_page( True)
if not self.valid and valid:
# this image was previously not valid, but is now, so it must be new
#if self.BaseDllName.startswith( self.process.eprocess.ImageFileName.strip( "\0")):
# print "Entrypoint is 0x%08x" % self.EntryPoint
# watchpoint = EntryPointWatchpoint( self.process, self.EntryPoint)
# self.process.watchpoints.add_function_call_watchpoint( watchpoint)
if self.BaseDllName.lower().endswith( '.dll'):
self.image_type = IMAGE_TYPE_DLL
elif self.BaseDllName.lower().endswith( '.exe'):
self.image_type = IMAGE_TYPE_EXE
#print "DLL: %s"%(self.BaseDllName.lower())
if self.valid or valid:
self.cached.entrypoint = int( self.ldr_data_table_entry.EntryPoint)
self.cached.sizeofimage = int( self.ldr_data_table_entry.SizeOfImage)
self.cached.dllbase = int( self.ldr_data_table_entry.DllBase)
self.cached.fulldllname = self.ldr_data_table_entry.FullDllName.str()
self.cached.basedllname = self.ldr_data_table_entry.BaseDllName.str()
if valid and self.process.watched and not hasattr( self, "pe"):
try:
self.pe = PE( VMemBackend( self.DllBase,
self.DllBase + self.SizeOfImage ),
self.BaseDllName,
True)
except PageFaultException, pagefault:
self.pending_pages.add( pagefault.value / PAGESIZE)
if valid and not self.exports_done and hasattr( self, "pe") and hasattr( self.pe.Exports, "ExportAddressTable"):
try:
self.exports.update(self.pe.Exports.all_exports())
self.process.symbols.update(self.exports)
self.exports_done = True
except PageFaultException, pagefault:
self.pending_pages.add( pagefault.value / PAGESIZE)
if not self.valid and valid and self.process.watched:
self.dump_pending = True
pending = False
for page in xrange( self.DllBase, self.DllBase + self.SizeOfImage, PAGESIZE):
try:
dummy = self.process.backend.read( page, 1)
except:
self.pending_pages.add( page / PAGESIZE)
pending = True
if pending:
pass
self.valid = valid
def dump( self):
start = self.DllBase
size = self.SizeOfImage
time = 0
try:
data = PyFlxInstrument.vmem_read( start, size)
tag = self.FullDllName
for p in xrange( start / PAGESIZE, (start + size) / PAGESIZE ):
if p in self.process.writes:
self.process.writes[ p].last_dumped = time
else:
self.process.writes[ p] = ModifiedPage( self, p)
self.process.writes[ p].last_dumped = time
self.dump_pending = False
except PageFaultException, pagefault:
self.pending_pages.add( pagefault.value / PAGESIZE)
#PyBochsC.pending_page( True)
# --- class Process ----------------------------------------------------
class Process( object):
def get_pid( self): return self.eprocess.UniqueProcessId
pid = property( get_pid)
def get_ppid( self): return self.eprocess.InheritedFromUniqueProcessId
ppid = property( get_ppid)
def get_cur_tid(self):
teb = self.kpcr.PrcbData.CurrentThread.deref().Teb
if teb.is_null():
return -1
return teb.deref().ClientId.UniqueThread
cur_tid = property(get_cur_tid)
def get_imagefilename( self): return self.eprocess.ImageFileName
ImageFileName = property( get_imagefilename)
def check_update_pending( self): return not self.valid or self.last_updated < self.last_seen
update_pending = property( check_update_pending)
def innovate( self):
self.innovated = True
def innovates( function):
#function decorator
def innovating_wrapper( self, *args, **kwargs):
self.innovate()
function( *args, **kwargs)
return innovating_wrapper
def ev_write( self, address, size):
# Convention: This is only called if the process is watched
# Writes from kernel space code should not be of interest
eip = PyFlxInstrument.eip()
if eip < USER_KERNEL_SPLIT and address + size < USER_KERNEL_SPLIT: # FIXME investigate: why is the write target limitation here?
self.shortterm_writes.add( address/256)
page = address / PAGESIZE
if page not in self.writes:
self.writes[ page] = ModifiedPage( self, page)
self.writes[ page].write(eip, address, size) # FIXME do we care about spilling writes across two pages?
return -1 # if the process is watched, we want to take note of writes happening from userspace code
else:
return 0
def dump_range( self, address):
# TODO:
# really dump ranges, attach tags, dump whole images if range falls within image
time = 0 #PyBochsC.emulator_time()
vad = self.vad_tree.by_address( address)
if vad != None:
start = vad.StartingVpn * PAGESIZE
end = (vad.EndingVpn + 1) * PAGESIZE
size = end-start
try:
t = DUMP_IMAGE
tag = vad.ControlArea.deref().FilePointer.deref().FileName.str()
except:
# Maybe packers like morphine modified the module lists for us?
image = self.get_image_by_address( address)
if image:
t = DUMP_IMAGE
tag = image.BaseDllName
else:
t = DUMP_UNSPECIFIED
tag = "anonymous"
try:
data = PyFlxInstrument.vmem_read( start, size)
t |= DUMP_FULL
except PageFaultException, pagefault:
print "Page fault when trying to dump", pagefault
# zero-pad missing memory
data = ""
print "trying to dump from 0x%08x to 0x%08x" % (start, end)
for i in xrange( start, end, PAGESIZE):
try:
data += PyFlxInstrument.vmem_read( i, PAGESIZE)
except PageFaultException:
data += '\0' * PAGESIZE
t |= DUMP_PARTIAL
# clear the sets:
page = address / PAGESIZE
writers = self.writes[ page].writers.copy()
while page in self.writes:
del self.writes[page]
page -= 1
page = address / PAGESIZE + 1 #self.writes[address/PAGESIZE] already clear
while page in self.writes:
del self.writes[page]
page += 1
print "about to insert a %u byte dump into the database, with type %u and tag %s" %( len(data), t, tag)
else:
raise Exception( "Executing non-existing memory?")
def pending_page( self):
if len( self.pending_pages) > 0:
return self.pending_pages.pop() * PAGESIZE
else:
for base in self.images:
if len( self.images[ base].pending_pages) > 0:
return self.images[ base].pending_pages.pop() * PAGESIZE
elif self.images[ base].dump_pending:
self.images[ base].dump()
return None
def print_stack( self, function, source, offset = 0):
function_name = function.name
ESP = PyFlxInstrument.genreg(PyBochsC.REG_ESP)
function_definition = []
for arg in function.arguments:
if type(arg.type) == pygccxml.declarations.cpptypes.pointer_t:
if str(arg.type.base) in ('xxxchar', 'char const'):
t = P(STR)
elif str(arg.type.base) in ('xxxwchar_t', 'wchar_t const'):
t = P(WSTR)
else:
t = "I"
elif type(arg.type) in (pygccxml.declarations.typedef.typedef_t, pygccxml.declarations.cpptypes.declarated_t):
if arg.type.declaration.name in ('LPCSTR', 'xxxLPSTR'):
t = P(STR)
elif arg.type.declaration.name in ('LPCWSTR','xxxLPWSTR'):
t = P(WSTR)
else:
dwords = arg.type.byte_size / 4
t = "I" * dwords # FIXME
else:
dwords = arg.type.byte_size / 4
t = "I" * dwords # FIXME
arg_definition = (arg.name, t)
function_definition.append(arg_definition)
stack = Stack(function_definition)( self.backend, ESP + offset)
output = []
for arg_def in function_definition:
arg = getattr( stack, arg_def[ 0])
if hasattr( arg, "deref"):
try:
output.append(u"%s = %s" % (arg_def[0], arg.deref()))
except PageFaultException:
output.append("%s = !0x%08x" % (arg_def[0], arg.offset))
except UnicodeEncodeError:
s = arg.deref()
output.append(u"%s = %s %u %s" % (arg_def[0],'+++',len(arg.deref()),unicode(s).encode('utf-8')))
except UnicodeDecodeError:
s = arg.deref()
str(s)
output.append(u"%s = %s %u %r" % (arg_def[0],'---',len(arg.deref()),str(s))) # FIXME UNICODE DECODE ERRORS
else:
output.append(u"%s = %s" % (arg_def[0], arg))
foo = u', '.join(output)
if offset:
print u"PPID %u/PID %u/TID %u/STOLEN/0x%08x -> %s(%r)" % (self.ppid,self.pid,self.cur_tid,source,unicode(function_name), foo)# FIXME UNICODE DECODE ERRORS
else:
print u"PPID %u/PID %u/TID %u/0x%08x -> %s(%r)" % (self.ppid,self.pid,self.cur_tid,source,unicode(function_name), foo)# FIXME UNICODE DECODE ERRORS
def ev_branch( self, source, target, type):
# Convention: This is only called if the process is watched
if target < USER_KERNEL_SPLIT:
#self.watchpoints.visit_location( target)
self.shortterm_branches.add( target/256)
func = None
source_image = self.get_image_by_address(source)
target_image = self.get_image_by_address(target)
if source_image == target_image:
pass
elif (source_image and source_image.DllBase == self.eprocess.Peb.deref().ImageBaseAddress and target_image) \
or (not source_image and target_image):
# store branches from within the image to other memory (for import reconstruction)
if target in self.symbols:
function_name = self.symbols[target][2]
if target not in self.gccxml_cache and function_name not in self.unknown_symbols:
self.innovate() # new, unknown branch target
try:
func = namespace.free_function(name=function_name)
self.gccxml_cache[target] = func
except pygccxml.declarations.matcher.declaration_not_found_t:
self.unknown_symbols.append(function_name)
except pygccxml.declarations.matcher.multiple_declarations_found_t:
# print "multiple matches for function '%s()'" % function_name
func = namespace.free_functions(name=function_name)[0]
self.gccxml_cache[target] = func
elif target in self.gccxml_cache:
func = self.gccxml_cache[target]
if func:
self.print_stack(func, source)
elif target not in self.symbols and source < USER_KERNEL_SPLIT: # kernel returns to userland addresses, but there's normally no symbol there
# interesting, target seems to be within a DLL, but there's no symbol at that address
# stolen bytes?
earlier_symbols = [address for address in self.symbols.keys() if address < target]
earlier_symbols.sort()
if earlier_symbols:
orig_target = target
target = earlier_symbols[-1]
address = target
stack_offset = 0
invalid = False
while address < orig_target:
insn = pydasm.get_instruction( PyFlxInstrument.vmem_read( address, 50, self.pdb), pydasm.MODE_32) # FIXME use real x86 instruction length limit here
#print pydasm.get_instruction_string(insn, pydasm.FORMAT_INTEL, address), insn.op1.reg, insn.op2.reg, insn.op3.reg
if not insn:
invalid = True
break
elif insn and insn.op1.reg == pydasm.REGISTER_ESP:
invalid = True # ESP is destroyed
elif insn.type == pydasm.INSTRUCTION_TYPE_POP:
stack_offset -= 4
elif insn.type == pydasm.INSTRUCTION_TYPE_PUSH:
stack_offset += 4
elif insn.type == pydasm.INSTRUCTION_TYPE_RET:
invalid = True # indicator of function boundary -> no luck for us
address += insn.length
candidate = self.symbols[target]
function_name = candidate[2]
if not invalid:
if target not in self.gccxml_cache and function_name not in self.unknown_symbols:
self.innovate() # new, unknown branch target
try:
func = namespace.free_function(name=function_name)
self.gccxml_cache[target] = func
except pygccxml.declarations.matcher.declaration_not_found_t:
self.unknown_symbols.append(function_name)
except pygccxml.declarations.matcher.multiple_declarations_found_t:
# multiple matches
func = namespace.free_functions(name=function_name)[0]
self.gccxml_cache[target] = func
elif target in self.gccxml_cache:
func = self.gccxml_cache[target]
if func:
self.print_stack(func, source, stack_offset)
else:
print "0x%08x -> 0x%08x: symbol at target not found, invalid candidate: %s, offset %u, image there is %s" % (source, orig_target, str(candidate),orig_target-target, target_image.BaseDllName)
pass
elif source_image and source_image.DllBase != self.eprocess.Peb.deref().ImageBaseAddress:
pass
page = target / PAGESIZE
if page in self.writes and target in self.writes[ page].writeset:
self.innovate()
print "executing 0x%08x -> 0x%08x" % (source, target)
self.dump_range( target)
return 1
else:
# not in user mode
return 0
def get_image_by_address( self, address):
bases = [base for base in self.images if base <= address]
bases.sort()
if bases:
image = self.images[bases[-1]]
else:
return None
if address <= image.DllBase + image.SizeOfImage:
return image
else:
return None
def __init__( self):
#self.pdb = pdb
#linear = PyBochsC.logical2linear( 0x30, 0, pdb)
linear = PyFlxInstrument.creg(4) #R_FS
self.backend = VMemBackend( 0, 0x100000000)
self.kpcr = KPCR( self.backend, linear)
self.watched = False
#self.watchpoints = Watchpoints( self)
self.symbols = {}
self.unknown_symbols = [] # insert symbols that pygccxml cannot find here
self.gccxml_cache = {}
self.pending_pages = set([])
self.images = {} # indexed by base address
self.valid = False
self.eprocess = None
self.last_seen = 0
self.last_updated = 0
self.vad_tree = VadTree( self)
self.writes = {}
self.last_executed_modified_page = None
self.innovated = False
self.dll_locations = set( [])
self.shortterm_writes = set( [])
self.shortterm_branches = set( [])
self.update()
def check_watched( self):
if not self.valid:
return False
return False #flx
if not self.watched:
imagefilename = self.kpcr.PrcbData.CurrentThread.deref().ApcState.Process.deref().ImageFileName
self.watched = globals()[ "samplename"].upper().startswith( imagefilename.strip( "\0").upper())
try:
ppid = self.ppid
except PageFaultException, pagefault:
self.pending_pages.add( pagefault.value / PAGESIZE)
#PyBochsC.pending_page( True)
return self.watched
for pdb in helper.processes:
try:
pid = helper.processes[ pdb].pid
except PageFaultException, pagefault:
self.pending_pages.add( pagefault.value / PAGESIZE)
#PyBochsC.pending_page( True)
continue
except AttributeError:
continue
if helper.processes[ pdb].watched and ppid == pid:
self.watched = True
break
if self.watched:
print "Now watching process with name '%s'" % imagefilename
self.innovate()
return self.watched
def update( self):
# Sanity check the data structures
valid = self.valid
if not valid:
valid = True
eprocess = self.kpcr.PrcbData.CurrentThread.deref().ApcState.Process.deref()
valid = valid and eprocess.CreateTime != 0
valid = valid and eprocess.ActiveThreads != 0
valid = valid and (eprocess.Peb.pointer & 0x7ff00000) == 0x7ff00000 # FIXME use named constant
valid = valid and eprocess.UniqueProcessId != 0
valid = valid and eprocess.InheritedFromUniqueProcessId != 0
# If all else fails, is this the System Process?
valid = valid or eprocess.ImageFileName.startswith( "System") \
and eprocess.UniqueProcessId == 4 \
and eprocess.InheritedFromUniqueProcessId == 0
# If all else fails, is this the Idle Process?
valid = valid or eprocess.ImageFileName.startswith( "Idle") \
and eprocess.UniqueProcessId == 4 \
and eprocess.InheritedFromUniqueProcessId == 0
if not self.valid and valid:
# new process
# print "New process '%s', PID %u, PPID %u" % (eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId)
# Cache eprocess - FIXME does doing this once suffice? is this even real caching( it's a StructuredData() after all)
self.eprocess = eprocess
if self.valid:
self.update_images()
self.valid = valid
self.check_watched()
self.last_updated = 0 #PyBochsC.emulator_time()
def update_images( self):
try:
eprocess = self.kpcr.PrcbData.CurrentThread.deref().ApcState.Process.deref()
except:
print "Could not fetch eprocess struct for process with page directory base 0x%08x" % self.pdb
return
try:
Peb = eprocess.Peb.deref()
except:
print "Could not fetch Peb pointed to by pointer at 0x%08x, pdb is 0x%08x" \
% (eprocess.Peb.offset, self.pdb)
return
try:
LdrData = eprocess.Peb.deref().Ldr.deref()
except:
print "Could not fetch LdrData pointed to by pointer at 0x%08x, pdb is 0x%08x" \
% ( eprocess.Peb.deref().Ldr.offset, self.pdb)
return
module_list = LdrData.InMemoryOrderModuleList
image = LdrData.InMemoryOrderModuleList.next()
while None != image:
if image.DllBase not in self.images:
# a new DLL was found in memory
self.innovate()
self.images[ image.DllBase] = Image( image, self)
elif not self.images[ image.DllBase].valid or not self.images[ image.DllBase].exports_done:
self.images[ image.DllBase].update()
elif self.watched and not hasattr( self.images[ image.DllBase], "pe"):
self.images[ image.DllBase].update()
image = LdrData.InMemoryOrderModuleList.next()
def enter( self):
if self.watched:
w = len( self.shortterm_writes)
b = len( self.shortterm_branches)
ratio = b and float( w) / float( b)
if w >= 50:
ratio = b and float( w) / float( b)
if ratio > 2:
self.innovate()
print "writes: %8u, branch targets: %6u, ratio: %04.2f" % ( w, b, ratio)
self.shortterm_writes.clear()
self.shortterm_branches.clear()
self.last_seen = 0 #PyBochsC.emulator_time()
# PyBochsC.pending_page( self.pending_pages != [])
if self.watched and self.innovated:
helper.sched_nonwatched = 0
self.innovated = False
elif self.valid and not self.eprocess.UniqueProcessId in (0,4):
helper.sched_nonwatched += 1
if not helper.sched_nonwatched % 200:
print helper.sched_nonwatched
if helper.sched_nonwatched > LIVENESS_BOUND and CHECK_LIVENESS:
print "No watched process appears to be live and showing progress, shutting down!"
#PyBochsC.shutdown()
pass
def leave( self):
pass
| gpl-2.0 | 7,401,991,622,607,399,000 | 40.86901 | 218 | 0.544525 | false |
jeroenh/OpenNSA | opennsa/backends/force10.py | 1 | 8993 | """
Force10 Backend.
This backend will only work with SSH version 2 capable Force10 switches.
This excludes most, if not all, of the etherscale series.
The backend has been developed for the E series.
The backend has been developed and tested on a Terascale E300 switch.
The switch (or router, depending on your level off pedanticness) is configured
by the backend logging via ssh, requesting a cli, and firing the necessary
command for configuring a VLAN. This approach was choosen over netconf / XML,
as a fairly reliable source said that not all the necessary functionality
needed was available via the previously mentioned interfaces.
Currently the backend does support VLAN rewriting, and I am not sure if/how it
is supported.
Configuration:
To setup a VLAN connection:
configure
interface vlan $vlan_id
name $name
description $description
no shut
tagged $source_port
tagged $dest_port
end
Teardown:
configure
no interface vlan $vlan_id
end
Ensure that the interfaces are configure to be layer 2.
Ralph developed a backend for etherscale, where a lot of the input from this
backend comes from.
Authors: Henrik Thostrup Jensen <[email protected]>
Ralph Koning <[email protected]>
Copyright: NORDUnet (2011-2012)
"""
from twisted.python import log
from twisted.internet import defer
from opennsa import error, config
from opennsa.backends.common import calendar as reservationcalendar, simplebackend, ssh
LOG_SYSTEM = 'opennsa.force10'
COMMAND_CONFIGURE = 'configure'
COMMAND_END = 'end'
COMMAND_EXIT = 'exit'
COMMAND_WRITE = 'write' # writes config
COMMAND_INTERFACE_VLAN = 'interface vlan %(vlan)i'
COMMAND_NAME = 'name %(name)s'
COMMAND_NO_SHUTDOWN = 'no shutdown'
COMMAND_TAGGED = 'tagged %(interface)s'
COMMAND_NO_INTERFACE = 'no interface vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
interface, vlan = nrm_port.rsplit('.')
vlan = int(vlan)
return interface, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
s_interface, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_interface, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_INTERFACE_VLAN % { 'vlan' : s_vlan }
cmd_name = COMMAND_NAME % { 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'interface' : s_interface }
cmd_d_intf = COMMAND_TAGGED % { 'interface' : d_interface }
commands = [ cmd_vlan, cmd_name, cmd_s_intf, cmd_d_intf, COMMAND_NO_SHUTDOWN, COMMAND_END ]
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
_, s_vlan = _portToInterfaceVLAN(source_nrm_port)
_, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_INTERFACE % { 'vlan' : s_vlan }
commands = [ cmd_no_intf, COMMAND_END ]
return commands
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = ''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
d = self.waitForData('#')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(cmd + LT)
yield d
# not quite sure how to handle failure here
log.msg('Commands send, sending end command.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_END + LT)
yield d
log.msg('Configuration done, writing configuration.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_WRITE + LT)
yield d
log.msg('Configuration written. Exiting.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_EXIT + LT)
yield d
except Exception, e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = ''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class Force10CommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
@defer.inlineCallbacks
def _sendCommands(self, commands):
# Note: FTOS does not allow multiple channels in an SSH connection,
# so we open a connection for each request. Party like it is 1988.
# The "correct" solution for this would be to create a connection pool,
# but that won't happen just now.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
yield channel.channel_open
yield channel.sendCommands(commands)
finally:
ssh_connection.transport.loseConnection()
def setupLink(self, source_nrm_port, dest_nrm_port):
log.msg('Setting up link: %s-%s' % (source_nrm_port, dest_nrm_port), debug=True, system=LOG_SYSTEM)
commands = _createSetupCommands(source_nrm_port, dest_nrm_port)
return self._sendCommands(commands)
def teardownLink(self, source_nrm_port, dest_nrm_port):
log.msg('Tearing down link: %s-%s' % (source_nrm_port, dest_nrm_port), debug=True, system=LOG_SYSTEM)
commands = _createTeardownCommands(source_nrm_port, dest_nrm_port)
return self._sendCommands(commands)
class Force10Backend:
def __init__(self, network_name, configuration):
self.network_name = network_name
self.calendar = reservationcalendar.ReservationCalendar()
# extract config items
cfg_dict = dict(configuration)
host = cfg_dict[config.FORCE10_HOST]
port = cfg_dict.get(config.FORCE10_PORT, 22)
host_fingerprint = cfg_dict[config.FORCE10_HOST_FINGERPRINT]
user = cfg_dict[config.FORCE10_USER]
ssh_public_key = cfg_dict[config.FORCE10_SSH_PUBLIC_KEY]
ssh_private_key = cfg_dict[config.FORCE10_SSH_PRIVATE_KEY]
self.command_sender = Force10CommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key)
def createConnection(self, source_nrm_port, dest_nrm_port, service_parameters):
self._checkVLANMatch(source_nrm_port, dest_nrm_port)
# probably need a short hand for this
self.calendar.checkReservation(source_nrm_port, service_parameters.start_time, service_parameters.end_time)
self.calendar.checkReservation(dest_nrm_port , service_parameters.start_time, service_parameters.end_time)
self.calendar.addConnection(source_nrm_port, service_parameters.start_time, service_parameters.end_time)
self.calendar.addConnection(dest_nrm_port , service_parameters.start_time, service_parameters.end_time)
c = simplebackend.GenericConnection(source_nrm_port, dest_nrm_port, service_parameters, self.network_name, self.calendar,
'Force10 NRM', LOG_SYSTEM, self.command_sender)
return c
def _checkVLANMatch(self, source_nrm_port, dest_nrm_port):
source_vlan = source_nrm_port.split('.')[-1]
dest_vlan = dest_nrm_port.split('.')[-1]
if source_vlan != dest_vlan:
raise error.InvalidRequestError('Cannot create connection between different VLANs (%s/%s).' % (source_vlan, dest_vlan) )
| bsd-3-clause | 3,256,750,307,926,198,300 | 31.232975 | 132 | 0.644501 | false |
tangle70/Python | ssh-listfiles.py | 1 | 1039 | #!/bin/env/python
###################################################################################
#
# A script to list files in a directory using via SSH using the paramiko module.
#
###################################################################################
import paramiko
def listFiles(srv, uname, passwd):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(srv, username=uname, password=passwd)
stdin, stdout, stderr = ssh.exec_command('ls')
stdin.flush()
data = stdout
x = 0
print '################################################'
print srv
for line in data:
line = line.replace('\n','')
print ' ', line
except:
print '################################################'
print 'ERROR: conencting to', srv
srv = 'srv'
uname = 'uname'
passwd = 'passwd'
listFiles(srv,uname,passwd)
print '################################################'
| gpl-2.0 | -1,191,662,196,745,778,700 | 30.484848 | 83 | 0.407122 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.