repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
REVLWorld/elasticsearch-dsl-py | elasticsearch_dsl/query.py | 1 | 8154 | from .utils import DslBase, _make_dsl_class
from .function import SF, ScoreFunction
__all__ = [
'Q', 'Bool', 'Boosting', 'Common', 'ConstantScore', 'DisMax', 'Filtered',
'FunctionScore', 'Fuzzy', 'FuzzyLikeThis', 'FuzzyLikeThisField',
'GeoShape', 'HasChild', 'HasParent', 'Ids', 'Indices', 'Match', 'MatchAll',
'MatchPhrase', 'MatchPhrasePrefix', 'MoreLikeThis', 'MoreLikeThisField',
'MultiMatch', 'Nested', 'Prefix', 'Query', 'QueryString', 'Range',
'Regexp', 'SF', 'ScoreFunction', 'SimpleQueryString', 'SpanFirst',
'SpanMulti', 'SpanNear', 'SpanNot', 'SpanOr', 'SpanTerm', 'Template',
'Term', 'Terms', 'TopChildren', 'Wildcard'
]
def Q(name_or_query='match_all', **params):
# {"match": {"title": "python"}}
if isinstance(name_or_query, dict):
if params:
raise ValueError('Q() cannot accept parameters when passing in a dict.')
if len(name_or_query) != 1:
raise ValueError('Q() can only accept dict with a single query ({"match": {...}}). '
'Instead it got (%r)' % name_or_query)
name, params = name_or_query.copy().popitem()
return Query.get_dsl_class(name)(**params)
# MatchAll()
if isinstance(name_or_query, Query):
if params:
raise ValueError('Q() cannot accept parameters when passing in a Query object.')
return name_or_query
# s.query = Q('filtered', query=s.query)
if hasattr(name_or_query, '_proxied'):
return name_or_query._proxied
# "match", title="python"
return Query.get_dsl_class(name_or_query)(**params)
class Query(DslBase):
_type_name = 'query'
_type_shortcut = staticmethod(Q)
name = None
def __add__(self, other):
# make sure we give queries that know how to combine themselves
# preference
if hasattr(other, '__radd__'):
return other.__radd__(self)
return Bool(must=[self, other])
def __invert__(self):
return Bool(must_not=[self])
def __or__(self, other):
# make sure we give queries that know how to combine themselves
# preference
if hasattr(other, '__ror__'):
return other.__ror__(self)
return Bool(should=[self, other])
def __and__(self, other):
# make sure we give queries that know how to combine themselves
# preference
if hasattr(other, '__rand__'):
return other.__rand__(self)
return Bool(must=[self, other])
class MatchAll(Query):
name = 'match_all'
def __add__(self, other):
return other._clone()
__and__ = __rand__ = __radd__ = __add__
def __or__(self, other):
return self
__ror__ = __or__
EMPTY_QUERY = MatchAll()
class Bool(Query):
name = 'bool'
_param_defs = {
'must': {'type': 'query', 'multi': True},
'should': {'type': 'query', 'multi': True},
'must_not': {'type': 'query', 'multi': True},
'filter': {'type': 'query', 'multi': True},
}
def __add__(self, other):
q = self._clone()
if isinstance(other, Bool):
q.must += other.must
q.should += other.should
q.must_not += other.must_not
q.filter += other.filter
else:
q.must.append(other)
return q
__radd__ = __add__
def __or__(self, other):
for q in (self, other):
if isinstance(q, Bool) and len(q.should) == 1 and not any((q.must, q.must_not, q.filter)):
other = self if q is other else other
q = q._clone()
q.should.append(other)
return q
return Bool(should=[self, other])
__ror__ = __or__
def __invert__(self):
# special case for single negated query
if not (self.must or self.should or self.filter) and len(self.must_not) == 1:
return self.must_not[0]._clone()
# bol without should, just flip must and must_not
elif not self.should:
q = self._clone()
q.must, q.must_not = q.must_not, q.must
if q.filter:
q.filter = [Bool(must_not=q.filter)]
return q
# TODO: should -> must_not.append(Bool(should=self.should)) ??
# queries with should just invert normally
return super(Bool, self).__invert__()
def __and__(self, other):
q = self._clone()
if isinstance(other, Bool):
q.must += other.must
q.must_not += other.must_not
q.filter += other.filter
q.should = []
for qx in (self, other):
# TODO: percentages will fail here
min_should_match = getattr(qx, 'minimum_should_match', 0 if (qx.must or qx.filter) else 1)
# all subqueries are required
if len(qx.should) <= min_should_match:
q.must.extend(qx.should)
# not all of them are required, use it and remember min_should_match
elif not q.should:
q.minimum_should_match = min_should_match
q.should = qx.should
# not all are required, add a should list to the must with proper min_should_match
else:
q.must.append(Bool(should=qx.should, minimum_should_match=min_should_match))
else:
if not (q.must or q.filter) and q.should:
q._params.setdefault('minimum_should_match', 1)
q.must.append(other)
return q
__rand__ = __and__
class FunctionScore(Query):
name = 'function_score'
_param_defs = {
'query': {'type': 'query'},
'filter': {'type': 'query'},
'functions': {'type': 'score_function', 'multi': True},
}
def __init__(self, **kwargs):
if 'functions' in kwargs:
pass
else:
fns = kwargs['functions'] = []
for name in ScoreFunction._classes:
if name in kwargs:
fns.append({name: kwargs.pop(name)})
super(FunctionScore, self).__init__(**kwargs)
QUERIES = (
# compound queries
('boosting', {'positive': {'type': 'query'}, 'negative': {'type': 'query'}}),
('constant_score', {'query': {'type': 'query'}, 'filter': {'type': 'query'}}),
('dis_max', {'queries': {'type': 'query', 'multi': True}}),
('filtered', {'query': {'type': 'query'}, 'filter': {'type': 'query'}}),
('indices', {'query': {'type': 'query'}, 'no_match_query': {'type': 'query'}}),
# relationship queries
('nested', {'query': {'type': 'query'}}),
('has_child', {'query': {'type': 'query'}}),
('has_parent', {'query': {'type': 'query'}}),
('top_children', {'query': {'type': 'query'}}),
# compount span queries
('span_first', {'match': {'type': 'query'}}),
('span_multi', {'match': {'type': 'query'}}),
('span_near', {'clauses': {'type': 'query', 'multi': True}}),
('span_not', {'exclude': {'type': 'query'}, 'include': {'type': 'query'}}),
('span_or', {'clauses': {'type': 'query', 'multi': True}}),
# core queries
('common', None),
('fuzzy', None),
('fuzzy_like_this', None),
('fuzzy_like_this_field', None),
('geo_bounding_box', None),
('geo_distance', None),
('geo_distance_range', None),
('geo_polygon', None),
('geo_shape', None),
('geohash_cell', None),
('ids', None),
('limit', None),
('match', None),
('match_phrase', None),
('match_phrase_prefix', None),
('exists', None),
('missing', None),
('more_like_this', None),
('more_like_this_field', None),
('multi_match', None),
('prefix', None),
('query_string', None),
('range', None),
('regexp', None),
('simple_query_string', None),
('span_term', None),
('template', None),
('term', None),
('terms', None),
('wildcard', None),
('script', None),
('type', None),
)
# generate the query classes dynamically
for qname, params_def in QUERIES:
qclass = _make_dsl_class(Query, qname, params_def)
globals()[qclass.__name__] = qclass
| apache-2.0 | -1,174,660,242,213,295,000 | 33.697872 | 106 | 0.536056 | false |
lambdamusic/xtm-hacking | extras/gendocs/parse copy.py | 1 | 4574 | #!/usr/bin/env python
"""
# extracts functions code from xtm source code, returns a structure consisting of a list of metadata dictionaries (one per function)
USAGE
python parse.py > data.json
"""
import os, sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
# get this path
_dirname, _filename = os.path.split(os.path.abspath(__file__))
# for dynamic links generation
GITHUB_BASE_URL = "https://github.com/digego/extempore/tree/0.6.0/"
# location of extempore src
DEFAULT_XTMDIR = ['/usr/local/Cellar/extempore/0.6.0/share/extempore/']
##
### main functions
##
def parse_extempore_src(_DIRS=DEFAULT_XTMDIR, noexamples=True, notests=True):
"""
recursive src files browser & extractor
<noexamples, notests>: flags to avoid parsing those folders
"""
index = []
for path in _DIRS:
print >> sys.stderr, "******** Extracting documentation from: \n=> " + _DIRS[0] + " ==\n********"
for root, dirs, files in os.walk(path):
for file in files:
if noexamples and "/share/extempore/examples" in root:
continue
elif notests and "/share/extempore/tests" in root:
continue
else:
if file.endswith(".xtm"):
print >> sys.stderr, root + "/" + file
index += _parse_onefile(root + "/" + file, path)
index = sorted(index, key=lambda x: x['name'])
return index
def _parse_onefile(f, original_path, IGNORE_CONSTANTS=True):
"""
extract definitions
eg (define *gm-kick* 35)
returns a list of dicts
[{'name' : titlebuffer, 'codepygments' : linebuffer, 'file' : f, 'url' : url, 'group' : group}]
@todo: extract also the fun comments
"""
output = []
lines = open(f).read().splitlines()
titlebuffer = None
linebuffer = []
def _genData(linebuffer, titlebuffer, f, original_path):
"""wrapping common func"""
# add pygments
lexer = get_lexer_by_name("scheme", stripall=True)
result = highlight(linebuffer, lexer, HtmlFormatter())
# hardcode github url
url = f.replace(original_path, GITHUB_BASE_URL)
return [{'name' : titlebuffer,
# 'code' : _saveSpaces(linebuffer),
'codepygments' : result,
'file' : f,
'url' : url,
'group' : inferGroup(titlebuffer) }]
for line in lines:
# print line
if titlebuffer and linebuffer and not line:
# it's a definition delimited by an empty line => save
output += _genData(linebuffer, titlebuffer, f, original_path)
titlebuffer = None
linebuffer = []
elif line.startswith("(define ") or line.startswith("(bind-func "):
# it's a definition delimited by a new def => save
# but of course the first time round <linebuffer> is empty
#
if linebuffer:
output += _genData(linebuffer, titlebuffer, f, original_path)
lline = line.split()
# get function name
titlebuffer = _getTitle(lline[1])
# titlebuffer = _remove_parenthesis(lline[1])
if IGNORE_CONSTANTS and titlebuffer.startswith("*"):
titlebuffer = None
linebuffer = None
else:
linebuffer = line
else:
# print line + "/n/n"
if titlebuffer:
linebuffer += "\n" + line
return output
# ;;;;;;;;;;;;;;;;;;;;;;;;
# ;;;;;;; UTILS ;;;;;;;;
# ;;;;;;;;;;;;;;;;;;;;;;;;;;
def _getTitle(s):
s = _remove_parenthesis(s)
if "[" and "]" in s:
# eg (bind-func qbuf_push:[void,QBuffer*,!a]*
s = s.split(":")[0]
return s
def _remove_parenthesis(s):
s = s.replace("(", "")
s = s.replace(")", "")
return s
def _saveSpaces(line):
return line.replace(" ", " ")
def inferGroup(titlebuffer):
"""infers the function prefix"""
if titlebuffer[0] == "*":
return "*var*"
if titlebuffer[0] in ["-", "_"]: #["*", "-", "_"]
#strip first letter
titlebuffer = titlebuffer[1:]
idx = titlebuffer.rfind(":")
# print idx, titlebuffer[:idx+1]
if idx > 0:
return titlebuffer[:idx+1]
else:
return "top level" # default
# ;;;;;;;;;;;;;;;;;;;;;;;;
# ;;;;;;; main Caller ;;;;;;;;
# ;;;;;;;;;;;;;;;;;;;;;;;;;;
def main(args):
if len(args) < 2:
# DEFAULT - IMPORTANT: if not starting at ../share/extempore/ level the links to GitHub will be broken
DIRS = DEFAULT_XTMDIR
else:
_dir = args[1]
if _dir[-1] != "/": # ps: must end with slash
_dir += "/"
DIRS = [_dir]
if os.path.isdir(DIRS[0]):
##
print parse_extempore_src(DIRS)
##
else:
print >> sys.stderr, "Directory does not exist"
if __name__ == '__main__':
import sys
try:
main(sys.argv)
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
| apache-2.0 | -4,090,661,951,149,218,300 | 19.328889 | 132 | 0.613467 | false |
google/digitalbuildings | tools/validators/instance_validator/validate/telemetry_warning.py | 1 | 1521 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container for a telemetry validation error."""
class TelemetryWarning(object):
"""Container for a telemetry validation warning.
Args:
entity: name of the entity with the warning
point: name of the point with the warning (can be None)
message: specific warning message
"""
def __init__(self, entity, point, message):
super().__init__()
self.entity = entity
self.point = point
self.message = message
def __eq__(self, other):
if not isinstance(other, TelemetryWarning):
return NotImplemented
return (self.entity == other.entity and
self.point == other.point and
self.message == other.message)
def GetPrintableMessage(self):
"""Returns a human-readable message that explains this warning."""
msg = '- entity [{0}]'.format(self.entity)
if self.point:
msg += ', point [{0}]'.format(self.point)
msg += ': {0}\n'.format(self.message)
return msg
| apache-2.0 | 6,332,066,584,468,475,000 | 32.065217 | 74 | 0.69165 | false |
vrpolak/slowsort | mutable_stable_lazy_zigzag_pairing_heap.py | 1 | 4167 | """Module that defines mutable stable zigzag pairing heap."""
from pep_3140 import Deque
from pep_3140 import List
from sorted_using_heap import sorted_using_mutable_stable_heap
from mutable_priority_queue import MutablePriorityQueue
class MutableStableLazyZigzagPairingHeap(MutablePriorityQueue):
"""A heap that is mutable, stable, lazy, and zigzag pairing.
Heap: An implementation, usable as a queue, least priority value in, first out.
Lazy: Least element is determined only upon pop, in hope to get more relevant comparisons.
Mutable: Self is altered regularily to avoid excessive object creation.
Stable: Two include methods to allow caller decide tiebreaker.
Pairing: Most subheap comparisons are on pairs of "equal" sub-heaps.
Zigzag: The odd sub-heap is left at alternating ends.
This implementation uses Deque to store ordered collection of sub-heaps."""
def __init__(self, top_item=None, forest=None):
"""Initialize a queue."""
self.top_item = top_item
self.forest = forest if forest is not None else Deque()
def ensure_top_demoted(self):
"""In case heap has a top, demote it so merge is easier."""
if self.top_item is None:
return
demoted = MutableStableLazyZigzagPairingHeap(self.top_item, self.forest)
self.top_item = None
self.forest = Deque([demoted])
def add(self, item):
"""Add item to self, prioritized after current items, do not compare yet."""
self.ensure_top_demoted()
self.forest.append(MutableStableLazyZigzagPairingHeap(top_item=item))
def _include_after(self, heap):
"""Include another heap, prioritized after current items."""
self.forest.append(heap)
def _include_before(self, heap):
"""Include another heap, prioritized before current items."""
self.forest.appendleft(heap)
def peek(self):
"""Return least priority item, this includes promoting top, but not extraction."""
self.ensure_top_promoted()
return self.top_item
def pop(self):
"""If not empty, extract the least item from self and return that."""
self.ensure_top_promoted()
item = self.top_item
self.top_item = None
return item
# TODO: Merge this into peek(), weak heaps suggest that makes things faster. Or is it not bothering with len?
def ensure_top_promoted(self):
"""Do pairwise includes in zigzag fashion until there is only one tree. Then upgrade."""
if (self.top_item is not None) or (not self.forest):
return
while len(self.forest) > 1:
# zig
new_forest = Deque()
while len(self.forest) > 1:
latter = self.forest.pop()
former = self.forest.pop()
# Sub-heaps should be nonempty and have top promoted.
if latter.top_item < former.top_item:
latter._include_before(former)
new_forest.appendleft(latter)
else:
former._include_after(latter)
new_forest.appendleft(former)
if self.forest:
new_forest.appendleft(self.forest.pop())
self.forest = new_forest
# zag
new_forest = Deque()
while len(self.forest) > 1:
former = self.forest.popleft()
latter = self.forest.popleft()
if latter.top_item < former.top_item:
latter._include_before(former)
new_forest.append(latter)
else:
former._include_after(latter)
new_forest.append(former)
if self.forest:
new_forest.append(self.forest.pop())
self.forest = new_forest
new_state = self.forest.pop()
self.top_item = new_state.top_item
self.forest = new_state.forest
def mslzph_sorted(source):
"""Return new List of items, sorted using the mslzp heap."""
return sorted_using_mutable_stable_heap(MutableStableLazyZigzagPairingHeap, source)
| agpl-3.0 | -1,490,482,913,868,144,000 | 40.257426 | 113 | 0.61963 | false |
LalatenduMohanty/imagefactory | imagefactory_plugins/Docker/Docker.py | 1 | 20234 | #!/usr/bin/python
#
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zope
import libxml2
import json
import os
import os.path
import struct
import subprocess
import datetime
import random
import shutil
import oz.TDL
import tempfile
import tarfile
import threading
import subprocess
import hashlib
from xml.etree.ElementTree import fromstring
from imgfac.Template import Template
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.BuildDispatcher import BuildDispatcher
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.CloudDelegate import CloudDelegate
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist, create_cloud_info, parameter_cast_to_bool
class Docker(object):
zope.interface.implements(CloudDelegate)
compress_commands = { "xz": "xz -T 0 --stdout %s > %s",
"gzip": "gzip -c %s > %s",
"bzip2": "bzip2 -c %s > %s" }
# The templates below allow us to generate base images without a running docker locally
# [email protected] - 26-Aug-2014
# We know of at least two different output JSON formats. These relate to some JSON marshaling
# changes in the docker 1.0.0 timeframe. At the time of this comment, the upstream registry will
# only accept the older 0.11.1 format which is what we default to.
# Note that there is a separate "VERSION" file in each subdirectory. As of this comment
# that file always contains 1.0
# TODO: Get rid of these silly string templates and just use the json module and dicts
#
# vbatts pointed out that creating these as string templates is kind of silly
# since we can just build them up as nested dicts and use json tools to create
# the required strings. I originally used strings to ensure absolute fidelity to
# the observed docker output, but there's no real technical reason to do this
docker_json_template_0_11_1 = """{{
"id": "{idstring}",
"comment": "{commentstring}",
"created": "{createdtime}",
"container_config": {{
"Cmd": {cmd},
"Env": {env},
"Labels": {label},
"StdinOnce": false,
"OpenStdin": false,
"Tty": false,
"ExposedPorts": null,
"AttachStdin": false,
"AttachStdout": false,
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"NetworkDisabled": false,
"OnBuild": null,
"CpuShares": 0,
"MemorySwap": 0,
"Memory": 0,
"User": "",
"Domainname": "",
"Hostname": "",
"AttachStderr": false,
"PortSpecs": null
}},
"docker_version": "0.11.1",
"architecture": "{arch}",
"os": "{os}",
"Size": {size}
}}"""
docker_json_template_1_0_0 = """{{
"Comment": "{commentstring}",
"Container": "",
"DockerVersion": "1.0.0",
"Parent": "",
"Author": "",
"Os": "{os}",
"Created": "{createdtime}",
"Architecture": "{arch}",
"ContainerConfig": {{
"MemorySwap": 0,
"Hostname": "",
"Entrypoint": null,
"PortSpecs": null,
"Memory": 0,
"OnBuild": null,
"OpenStdin": false,
"Cpuset": "",
"Env": {env},
"User": "",
"CpuShares": 0,
"AttachStdout": false,
"NetworkDisabled": false,
"WorkingDir": "",
"Cmd": {cmd},
"Labels": {label},
"StdinOnce": false,
"AttachStdin": false,
"Volumes": null,
"Tty": false,
"AttachStderr": false,
"Domainname": "",
"Image": "",
"ExposedPorts": null
}},
"Config": null,
"Id": "{idstring}",
"Size": {size}
}}
"""
docker_json_template_1_7_0 = """{{
"Size": {size},
"architecture": "{arch}",
"comment": "{commentstring}",
"config": {{
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": {cmd},
"Domainname": "",
"Entrypoint": null,
"Env": {env},
"ExposedPorts": null,
"Hostname": "",
"Image": "",
"Labels": {label},
"MacAddress": "",
"NetworkDisabled": false,
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Systemd": false,
"Tty": false,
"User": "",
"VolumeDriver": "",
"Volumes": null,
"WorkingDir": ""
}},
"container_config": {{
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": null,
"Domainname": "",
"Entrypoint": null,
"Env": null,
"ExposedPorts": null,
"Hostname": "",
"Image": "",
"Labels": null,
"MacAddress": "",
"NetworkDisabled": false,
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Systemd": false,
"Tty": false,
"User": "",
"VolumeDriver": "",
"Volumes": null,
"WorkingDir": ""
}},
"created": "{createdtime}",
"docker_version": "1.7.0",
"id": "{idstring}",
"os": "{os}"
}}"""
docker_json_template_1_10_1 = """{{
"Size": {size},
"architecture": "{arch}",
"comment": "{commentstring}",
"config": {{
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": {cmd},
"Domainname": "",
"Entrypoint": null,
"Env": {env},
"ExposedPorts": null,
"Hostname": "",
"Image": "",
"Labels": {label},
"MacAddress": "",
"NetworkDisabled": false,
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Systemd": false,
"Tty": false,
"User": "",
"VolumeDriver": "",
"Volumes": null,
"WorkingDir": ""
}},
"container_config": {{
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": null,
"Domainname": "",
"Entrypoint": null,
"Env": null,
"ExposedPorts": null,
"Hostname": "",
"Image": "",
"Labels": null,
"MacAddress": "",
"NetworkDisabled": false,
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Systemd": false,
"Tty": false,
"User": "",
"VolumeDriver": "",
"Volumes": null,
"WorkingDir": ""
}},
"created": "{createdtime}",
"docker_version": "1.10.1",
"id": "{idstring}",
"os": "{os}"
}}"""
docker_templates_dict = { "0.11.1": docker_json_template_0_11_1,
"1.0.0": docker_json_template_1_0_0,
"1.7.0": docker_json_template_1_7_0,
"1.10.1": docker_json_template_1_10_1 }
def __init__(self):
super(Docker, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
raise ImageFactoryException("Pushing not currently supported for Docker image builds")
def snapshot_image_on_provider(self, builder, provider, credentials, template, parameters):
# TODO: Implement snapshot builds
raise ImageFactoryException("Snapshot builds not currently supported for Docker")
def _generate_docker_id(self):
# return a random 64 digit hex number
did = ""
for i in range(8):
did += "%08x" % (random.randint(0, 2 ** 32))
return did
def _file_sha256(self, filename):
f = open(filename, "rb")
hasher = hashlib.sha256()
while True:
chunk = f.read(2**20)
if not chunk:
break
hasher.update(chunk)
f.close()
return hasher.hexdigest()
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
# At this point our input base_image is available as builder.base_image.data
# We simply mount it up in libguestfs and tar out the results as builder.target_image.data
wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
compress_type = parameters.get('compress', None)
if compress_type:
if compress_type in self.compress_commands.keys():
compress_command = self.compress_commands[compress_type]
else:
raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
else:
compress_command = None
guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
storagedir = os.path.dirname(builder.target_image.data)
# guestfs lets us mount locally via the API, which is cool, but requires that
# we call a blocking function to activate the mount, which requires a thread
# We also need a temp dir to mount it to - do our best to clean up when things
# go wrong
tempdir = None
fuse_thread = None
try:
tempdir = tempfile.mkdtemp(dir=storagedir)
self.log.debug("Mounting input image locally at (%s)" % (tempdir))
guestfs_handle.mount_local(tempdir)
def _run_guestmount(g):
g.mount_local_run()
self.log.debug("Launching mount_local_run thread")
fuse_thread = threading.Thread(group=None, target=_run_guestmount, args=(guestfs_handle,))
fuse_thread.start()
self.log.debug("Creating tar of entire image")
# NOTE - we used to capture xattrs here but have reverted the change for now
# as SELinux xattrs break things in unexpected ways and the tar feature
# to allow selective inclusion is broken
# TODO: Follow up with tar maintainers and docker image creators to find out what
# if any xattrs we really need to capture here
tarcmd = [ 'tar', '-cf', builder.target_image.data, '-C', tempdir ]
# User may pass in a comma separated list of additional options to the tar command
tar_options = parameters.get('tar_options', None)
if tar_options:
tar_options_list=tar_options.split(',')
for option in tar_options_list:
tarcmd.append(option.strip())
# User may pass in a comma separated list of excludes to override this
# Default to ./etc/fstab as many people have complained this does not belong in Docker images
tar_excludes = parameters.get('tar_excludes', './etc/fstab').split(',')
for exclude in tar_excludes:
tarcmd.append('--exclude=%s' % (exclude.strip()))
tarcmd.append('./')
self.log.debug("Command: %s" % (str(tarcmd)))
subprocess.check_call(tarcmd)
if wrap_metadata:
self.log.debug("Estimating size of tar contents to include in Docker metadata")
size = 0
for root, dirs, files in os.walk(tempdir):
for name in files:
fp = os.path.join(root,name)
if os.path.isfile(fp) and not os.path.islink(fp):
size += os.path.getsize(fp)
self.log.debug("Total real file content size (%d)" % (size))
except Exception, e:
self.log.exception(e)
raise
finally:
if tempdir:
try:
subprocess.check_call( ['umount', '-f', tempdir] )
os.rmdir(tempdir)
except Exception, e:
self.log.exception(e)
self.log.error("WARNING: Could not unmount guest at (%s) - may still be mounted" % (tempdir) )
if fuse_thread:
fuse_thread.join(30.0)
if fuse_thread.isAlive():
self.log.error("Guestfs local mount thread is still active - FUSE filesystem still mounted at (%s)" % (tempdir) )
if wrap_metadata:
# Get any parameters and if they are not set, create our defaults
# Docker image names should not have uppercase characters
# https://fedorahosted.org/cloud/ticket/131
repository = parameters.get('repository',tdlobj.name).lower()
tag = parameters.get('tag','latest')
docker_image_id = parameters.get('docker_image_id', self._generate_docker_id())
cmd = parameters.get('docker_cmd', 'null')
env = parameters.get('docker_env', 'null')
label = parameters.get('docker_label', 'null')
rdict = { repository: { tag: docker_image_id } }
dockerversion = parameters.get('dockerversion', '0.11.1')
if not dockerversion in self.docker_templates_dict:
raise Exception("No docker JSON template available for specified docker version (%s)" % (dockerversion))
docker_json_template=self.docker_templates_dict[dockerversion]
arch = tdlobj.arch
if arch == "x86_64":
arch = "amd64"
elif arch == "armv7hl":
arch = "armhfp"
tdict = { }
tdict['commentstring'] = parameters.get('comment', 'Created by Image Factory')
tdict['os'] = parameters.get('os', 'linux')
tdict['createdtime'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
tdict['arch'] = arch
tdict['idstring'] = docker_image_id
tdict['cmd'] = cmd
tdict['env'] = env
tdict['label'] = label
tdict['size'] = size
image_json = docker_json_template.format(**tdict)
# v2 images
# TODO: Something significantly less hacky looking.....
if dockerversion == "1.10.1":
shasum = self._file_sha256(builder.target_image.data)
image_v2_config = json.loads(image_json)
# The new top level JSON file is a light modification of the layer JSON
del image_v2_config['Size']
del image_v2_config['id']
image_v2_config['history'] = [ { 'comment': image_v2_config['comment'],
'created': image_v2_config['created'] } ]
image_v2_config['rootfs'] = { 'diff_ids': [ "sha256:%s" % (shasum) ],
'type': 'layers' }
# Docker wants this config file to be named after its own sha256 sum
image_v2_config_id = hashlib.sha256(json.dumps(image_v2_config)).hexdigest()
image_v2_manifest = [ { "Config": "%s.json" % (image_v2_config_id),
"Layers": [ "%s/layer.tar" % (docker_image_id) ],
"RepoTags": [ "%s:%s" % (repository, tag) ] } ]
# Create directory
storagedir = os.path.dirname(builder.target_image.data)
tempdir = None
try:
tempdir = tempfile.mkdtemp(dir=storagedir)
self.log.debug("Creating docker image directory structure in (%s)" % (tempdir))
repositories_path = os.path.join(tempdir,'repositories')
repositories = open(repositories_path,"w")
json.dump(rdict, repositories)
repositories.close()
if dockerversion == "1.10.1":
config_path = os.path.join(tempdir, '%s.json' % (image_v2_config_id))
config = open(config_path, "w")
json.dump(image_v2_config, config)
config.close()
manifest_path = os.path.join(tempdir, 'manifest.json')
manifest = open(manifest_path, "w")
json.dump(image_v2_manifest, manifest)
manifest.close()
imagedir = os.path.join(tempdir, docker_image_id)
os.mkdir(imagedir)
jsonfile_path = os.path.join(imagedir,'json')
jsonfile = open(jsonfile_path,'w')
jsonfile.write(image_json)
jsonfile.close()
versionfile_path = os.path.join(imagedir,'VERSION')
versionfile = open(versionfile_path, 'w')
# TODO - Track version developments and compatibility
versionfile.write("1.0")
versionfile.close()
layerfile_path = os.path.join(imagedir,'layer.tar')
shutil.move(builder.target_image.data, layerfile_path)
outtar = tarfile.TarFile(name=builder.target_image.data, mode="w")
# It turns out that in at least some configurations or versions, Docker will
# complain if the repositories file is not the last file in the archive
# we add our single image directory first and then the repositories file to
# avoid this
outtar.add(imagedir, arcname=docker_image_id)
outtar.add(repositories_path, arcname='repositories')
if dockerversion == "1.10.1":
outtar.add(config_path, arcname='%s.json' % (image_v2_config_id))
outtar.add(manifest_path, arcname='manifest.json')
outtar.close()
finally:
if tempdir:
try:
shutil.rmtree(tempdir)
except:
self.log.warning("Error encountered when removing temp dir (%s) - may not have been deleted" % (tempdir))
if compress_command:
self.log.debug("Compressing tar file using %s" % (compress_type))
rawimage = builder.target_image.data
compimage = builder.target_image.data + ".tmp.%s" % (compress_type)
result = subprocess.call(compress_command % ( rawimage, compimage), shell = True)
if result:
raise Exception("Compression of image failed")
self.log.debug("Compression complete, replacing original")
os.unlink(rawimage)
os.rename(compimage, rawimage)
self.log.debug("Done")
return False
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
raise ImageFactoryException("builder_will_create_target_image called in Docker plugin - this should never happen")
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
raise ImageFactoryException("builder_did_create_target_image called in Docker plugin - this should never happen")
| apache-2.0 | -8,209,008,752,900,804,000 | 38.752456 | 139 | 0.561777 | false |
OmnesRes/prepub | final_populate.py | 1 | 20556 | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
import django
django.setup()
from papers.models import Article
from papers.models import Tag
from papers.models import Affiliation
from papers.models import Author
from datetime import date
##links for f1000 and biorxiv
##f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\figshare\figshare.txt')
##figshare=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\peerj\peerj.txt')
peerj=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\biorxiv\biorxiv.txt')
biorxiv=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\f1000research\f1000research.txt')
f1000research=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\arxiv\arxiv.txt')
arxiv=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\preprints\preprints.txt')
preprints=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\winnower\winnower.txt')
winnower=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\wellcome\wellcome.txt')
wellcome=[eval(i.strip()) for i in f]
f=open(r'C:\Users\Jordan Anaya\Desktop\prepub\nature\nature.txt')
nature=[eval(i.strip()) for i in f]
##work on dates
date_dict={"January":1,"February":2,"March":3,"April":4,"May":5,"June":6,\
"July":7,"August":8,"September":9,"October":10,"November":11,"December":12,
"Jan":1,"Feb":2,"Mar":3,"Apr":4,"May":5,"Jun":6,\
"Jul":7,"Aug":8,"Sep":9,"Oct":10,"Nov":11,"Dec":12}
#figshare:
##do this .split('T')[0]
##
##for i in peerj:
## print i[2]
##biorxiv
##do this
## temp=i[2].replace(',','').replace('.','').split()
## print temp[2],date_dict[temp[0]],temp[1]
##f1000research:
##do this
## temp=i[2].split()
## print temp[2],date_dict[temp[1]],temp[0]
for i in peerj:
paper=Article(title=i[0],abstract=i[3],link=i[4])
temp=i[2].split('-')
paper.pub_date=date(int(temp[0]),int(temp[1]),int(temp[2]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in f1000research:
paper=Article(title=i[0].replace('\t',''),abstract=i[3].replace('\r\n',' '),link='http://f1000research.com'+i[4])
temp=i[2].split()
paper.pub_date=date(int(temp[2]),date_dict[temp[1]],int(temp[0]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in biorxiv:
paper=Article(title=i[0].replace('\n',' ').replace('\t',''),abstract=i[3].replace('\t',''),link='http://biorxiv.org'+i[4])
temp=i[2].replace(',','').replace('.','').split()
paper.pub_date=date(int(temp[2]),date_dict[temp[0]],int(temp[1]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
##for i in figshare:
## paper=Article(title=i[0],abstract=i[3],link=i[4])
## temp=i[2].split('T')[0].split('-')
## paper.pub_date=date(int(temp[0]),int(temp[1]),int(temp[2]))
## paper.save()
## temp=[]
## for author in i[1]:
## name=author.replace(',','').replace('.','')
## if name[:3].lower()=='jr ':
## name=name[3:]
## if name[-3:].lower()==' jr':
## name=name[:-3]
## if name[:3].lower()=='sr ':
## name=name[3:]
## if name[-3:].lower()==' sr':
## name=name[:-3]
## first_name=name.split()[0]
## last_name=name.split()[-1]
## if len(name.split())==2:
## middle_name=''
## else:
## middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
## if middle_name!='':
## temp.append(first_name+' '+middle_name+' '+last_name)
## else:
## temp.append(first_name+' '+last_name)
## try:
## auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
## paper.authors.add(auth)
## except:
## auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
## paper.authors.add(auth)
## paper.author_list=str(temp)
## for affiliation in i[-1]:
## try:
## aff=Affiliation.objects.get(name=affiliation)
## paper.affiliations.add(aff)
## except:
## aff=Affiliation.objects.create(name=affiliation)
## paper.affiliations.add(aff)
## for t in i[-2]:
## try:
## tag=Tag.objects.get(name=t)
## paper.tags.add(tag)
## except:
## tag=Tag.objects.create(name=t)
## paper.tags.add(tag)
## paper.save()
##
for i in arxiv:
paper=Article(title=i[0].replace('\n ',''),abstract=i[3].replace('\n',' '),link=i[4])
temp=i[2].split('-')
paper.pub_date=date(int(temp[0]),int(temp[1]),int(temp[2]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in preprints:
paper=Article(title=i[0],abstract=i[3],link='https://www.preprints.org'+i[4])
temp=i[2].split()
paper.pub_date=date(int(temp[2]),date_dict[temp[0]],int(temp[1]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in winnower:
paper=Article(title=i[0].replace('\r\n',' '),abstract=i[3],link='https://thewinnower.com'+i[4])
temp=i[2].split()
paper.pub_date=date(int(temp[2]),date_dict[temp[0]],int(temp[1]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in wellcome:
paper=Article(title=i[0],abstract=i[3],link='https://wellcomeopenresearch.org'+i[4])
temp=i[2].split('/')
paper.pub_date=date(int(temp[0]),int(temp[1]),int(temp[2]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
for i in nature:
paper=Article(title=i[0],abstract=i[3],link=i[4])
temp=i[2].split('-')
paper.pub_date=date(int(temp[0]),int(temp[1]),int(temp[2]))
paper.save()
temp=[]
for author in i[1]:
name=author.replace(',','').replace('.','')
if name!='':
if name[:3].lower()=='jr ':
name=name[3:]
if name[-3:].lower()==' jr':
name=name[:-3]
if name[:3].lower()=='sr ':
name=name[3:]
if name[-3:].lower()==' sr':
name=name[:-3]
last_name=name.split()[-1]
if len(name.split())==1:
first_name=''
middle_name=''
elif len(name.split())==2:
first_name=name.split()[0]
middle_name=''
else:
first_name=name.split()[0]
middle_name=name.replace(first_name+' ','').replace(' '+last_name,'').strip()
if middle_name!='' and first_name!='':
temp.append(first_name+' '+middle_name+' '+last_name)
elif middle_name=='' and first_name:
temp.append(first_name+' '+last_name)
else:
temp.append(last_name)
try:
auth=Author.objects.get(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
except:
auth=Author.objects.create(first=first_name,middle=middle_name,last=last_name)
paper.authors.add(auth)
paper.author_list=str(temp)
for affiliation in i[-1]:
try:
aff=Affiliation.objects.get(name=affiliation)
paper.affiliations.add(aff)
except:
aff=Affiliation.objects.create(name=affiliation)
paper.affiliations.add(aff)
for t in i[-2]:
try:
tag=Tag.objects.get(name=t)
paper.tags.add(tag)
except:
tag=Tag.objects.create(name=t)
paper.tags.add(tag)
paper.save()
| mit | 1,597,231,650,557,225,000 | 34.6875 | 126 | 0.506859 | false |
googleinterns/IBRNet | ibrnet/data_loaders/create_training_dataset.py | 1 | 5158 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from . import dataset_dict
from torch.utils.data import Dataset, Sampler
from torch.utils.data import DistributedSampler, WeightedRandomSampler
from typing import Optional
from operator import itemgetter
import torch
class DatasetFromSampler(Dataset):
"""Dataset to create indexes from `Sampler`.
Args:
sampler: PyTorch sampler
"""
def __init__(self, sampler: Sampler):
"""Initialisation for DatasetFromSampler."""
self.sampler = sampler
self.sampler_list = None
def __getitem__(self, index: int):
"""Gets element of the dataset.
Args:
index: index of the element in the dataset
Returns:
Single element by index
"""
if self.sampler_list is None:
self.sampler_list = list(self.sampler)
return self.sampler_list[index]
def __len__(self) -> int:
"""
Returns:
int: length of the dataset
"""
return len(self.sampler)
class DistributedSamplerWrapper(DistributedSampler):
"""
Wrapper over `Sampler` for distributed training.
Allows you to use any sampler in distributed mode.
It is especially useful in conjunction with
`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSamplerWrapper instance as a DataLoader
sampler, and load a subset of subsampled data of the original dataset
that is exclusive to it.
.. note::
Sampler is assumed to be of constant size.
"""
def __init__(
self,
sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
):
"""
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler),
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
)
self.sampler = sampler
def __iter__(self):
self.dataset = DatasetFromSampler(self.sampler)
indexes_of_indexes = super().__iter__()
subsampler_indexes = self.dataset
return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
def create_training_dataset(args):
# parse args.train_dataset, "+" indicates that multiple datasets are used, for example "ibrnet_collect+llff+spaces"
# otherwise only one dataset is used
# args.dataset_weights should be a list representing the resampling rate for each dataset, and should sum up to 1
print('training dataset: {}'.format(args.train_dataset))
mode = 'train'
if '+' not in args.train_dataset:
train_dataset = dataset_dict[args.train_dataset](args, mode,
scenes=args.train_scenes
)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
else:
train_dataset_names = args.train_dataset.split('+')
weights = args.dataset_weights
assert len(train_dataset_names) == len(weights)
assert np.abs(np.sum(weights) - 1.) < 1e-6
print('weights:{}'.format(weights))
train_datasets = []
train_weights_samples = []
for training_dataset_name, weight in zip(train_dataset_names, weights):
train_dataset = dataset_dict[training_dataset_name](args, mode,
scenes=args.train_scenes,
)
train_datasets.append(train_dataset)
num_samples = len(train_dataset)
weight_each_sample = weight / num_samples
train_weights_samples.extend([weight_each_sample]*num_samples)
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
train_weights = torch.from_numpy(np.array(train_weights_samples))
sampler = WeightedRandomSampler(train_weights, len(train_weights))
train_sampler = DistributedSamplerWrapper(sampler) if args.distributed else sampler
return train_dataset, train_sampler
| apache-2.0 | 3,506,847,130,985,132,000 | 36.649635 | 119 | 0.627763 | false |
hawkeyexp/plugin.video.netflix | resources/lib/common/kodi_ops.py | 1 | 10441 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Helper functions for Kodi operations
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import json
import xbmc
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
from .misc_utils import is_less_version
__CURRENT_KODI_PROFILE_NAME__ = None
LOCALE_CONV_TABLE = {
'es-ES': 'es-Spain',
'pt-BR': 'pt-Brazil',
'fr-CA': 'fr-Canada',
'ar-EG': 'ar-Egypt',
'nl-BE': 'nl-Belgium',
'en-GB': 'en-UnitedKingdom'
}
def json_rpc(method, params=None):
"""
Executes a JSON-RPC in Kodi
:param method: The JSON-RPC method to call
:type method: string
:param params: The parameters of the method call (optional)
:type params: dict
:returns: dict -- Method call result
"""
request_data = {'jsonrpc': '2.0', 'method': method, 'id': 1,
'params': params or {}}
request = json.dumps(request_data)
LOG.debug('Executing JSON-RPC: {}', request)
raw_response = xbmc.executeJSONRPC(request)
# debug('JSON-RPC response: {}'.format(raw_response))
response = json.loads(raw_response)
if 'error' in response:
raise IOError('JSONRPC-Error {}: {}'
.format(response['error']['code'],
response['error']['message']))
return response['result']
def json_rpc_multi(method, list_params=None):
"""
Executes multiple JSON-RPC with the same method in Kodi
:param method: The JSON-RPC method to call
:type method: string
:param list_params: Multiple list of parameters of the method call
:type list_params: a list of dict
:returns: dict -- Method call result
"""
request_data = [{'jsonrpc': '2.0', 'method': method, 'id': 1, 'params': params or {}} for params in list_params]
request = json.dumps(request_data)
LOG.debug('Executing JSON-RPC: {}', request)
raw_response = xbmc.executeJSONRPC(request)
if 'error' in raw_response:
raise IOError('JSONRPC-Error {}'.format(raw_response))
return json.loads(raw_response)
def container_refresh(use_delay=False):
"""Refresh the current container"""
if use_delay:
# When operations are performed in the Kodi library before call this method
# can be necessary to apply a delay before run the refresh, otherwise the page does not refresh correctly
# seems to be caused by a race condition with the Kodi library update (but i am not really sure)
from time import sleep
sleep(1)
WndHomeProps[WndHomeProps.IS_CONTAINER_REFRESHED] = 'True'
xbmc.executebuiltin('Container.Refresh')
def container_update(url, reset_history=False):
"""Update the current container"""
func_str = 'Container.Update({},replace)' if reset_history else 'Container.Update({})'
xbmc.executebuiltin(func_str.format(url))
def get_local_string(string_id):
"""Retrieve a localized string by its id"""
src = xbmc if string_id < 30000 else G.ADDON
return src.getLocalizedString(string_id)
def run_plugin_action(path, block=False):
"""Create an action that can be run with xbmc.executebuiltin in order to run a Kodi plugin specified by path.
If block is True (default=False), the execution of code will block until the called plugin has finished running."""
return 'RunPlugin({}, {})'.format(path, block)
def run_plugin(path, block=False):
"""Run a Kodi plugin specified by path. If block is True (default=False),
the execution of code will block until the called plugin has finished running."""
xbmc.executebuiltin(run_plugin_action(path, block))
def schedule_builtin(time, command, name='NetflixTask'):
"""Set an alarm to run builtin command after time has passed"""
xbmc.executebuiltin('AlarmClock({},{},{},silent)'
.format(name, command, time))
def play_media(media):
"""Play a media in Kodi"""
xbmc.executebuiltin(G.py2_encode('PlayMedia({})'.format(media)))
def stop_playback():
"""Stop the running playback"""
xbmc.executebuiltin('PlayerControl(Stop)')
def get_current_kodi_profile_name(no_spaces=True):
"""Lazily gets the name of the Kodi profile currently used"""
if not hasattr(get_current_kodi_profile_name, 'cached'):
name = json_rpc('Profiles.GetCurrentProfile', {'properties': ['thumbnail', 'lockmode']}).get('label', 'unknown')
get_current_kodi_profile_name.cached = name.replace(' ', '_') if no_spaces else name
return get_current_kodi_profile_name.cached
class _WndProps(object): # pylint: disable=no-init
"""Read and write a property to the Kodi home window"""
# Default Properties keys
SERVICE_STATUS = 'service_status'
"""Return current service status"""
IS_CONTAINER_REFRESHED = 'is_container_refreshed'
"""Return 'True' when container_refresh in kodi_ops.py is used by context menus, etc."""
CURRENT_DIRECTORY = 'current_directory'
"""
Return the name of the currently loaded directory (so the method name of directory.py class), otherwise:
[''] When the add-on is in his first run instance, so startup page
['root'] When add-on startup page is re-loaded (like refresh) or manually called
Notice: In some cases the value may not be consistent example:
- when you exit to Kodi home
- external calls to the add-on while browsing the add-on
"""
def __getitem__(self, key):
try:
# If you use multiple Kodi profiles you need to distinguish the property of current profile
return G.WND_KODI_HOME.getProperty(G.py2_encode('netflix_{}_{}'.format(get_current_kodi_profile_name(),
key)))
except Exception: # pylint: disable=broad-except
return ''
def __setitem__(self, key, newvalue):
# If you use multiple Kodi profiles you need to distinguish the property of current profile
G.WND_KODI_HOME.setProperty(G.py2_encode('netflix_{}_{}'.format(get_current_kodi_profile_name(),
key)),
newvalue)
WndHomeProps = _WndProps()
def get_kodi_audio_language(iso_format=xbmc.ISO_639_1):
"""
Return the audio language from Kodi settings
WARNING: Based on Kodi player settings can also return values as: 'mediadefault', 'original'
"""
audio_language = json_rpc('Settings.GetSettingValue', {'setting': 'locale.audiolanguage'})
if audio_language['value'] in ['mediadefault', 'original']:
return audio_language['value']
return convert_language_iso(audio_language['value'], iso_format)
def get_kodi_subtitle_language(iso_format=xbmc.ISO_639_1):
"""Return the subtitle language from Kodi settings"""
subtitle_language = json_rpc('Settings.GetSettingValue', {'setting': 'locale.subtitlelanguage'})
if subtitle_language['value'] == 'forced_only':
return subtitle_language['value']
return convert_language_iso(subtitle_language['value'], iso_format)
def convert_language_iso(from_value, iso_format=xbmc.ISO_639_1):
"""
Convert given value (English name or two/three letter code) to the specified format
:param iso_format: specify the iso format (two letter code ISO_639_1 or three letter code ISO_639_2)
"""
return xbmc.convertLanguage(G.py2_encode(from_value), iso_format)
def fix_locale_languages(data_list):
"""Replace all the languages with the country code because Kodi does not support IETF BCP 47 standard"""
# Languages with the country code causes the display of wrong names in Kodi settings like
# es-ES as 'Spanish-Spanish', pt-BR as 'Portuguese-Breton', nl-BE as 'Dutch-Belarusian', etc
# and the impossibility to set them as the default audio/subtitle language
for item in data_list:
if item.get('isNoneTrack', False):
continue
if item['language'] == 'pt-BR' and not G.KODI_VERSION.is_less_version('18.7'):
# Replace pt-BR with pb, is an unofficial ISO 639-1 Portuguese (Brazil) language code
# has been added to Kodi 18.7 and Kodi 19.x PR: https://github.com/xbmc/xbmc/pull/17689
item['language'] = 'pb'
if len(item['language']) > 2:
# Replace know locale with country
# so Kodi will not recognize the modified country code and will show the string as it is
if item['language'] in LOCALE_CONV_TABLE:
item['language'] = LOCALE_CONV_TABLE[item['language']]
else:
LOG.error('fix_locale_languages: missing mapping conversion for locale "{}"'.format(item['language']))
class GetKodiVersion(object):
"""Get the kodi version, git date, stage name"""
# Examples of some types of supported strings:
# 10.1 Git:Unknown PRE-11.0 Git:Unknown 11.0-BETA1 Git:20111222-22ad8e4
# 18.1-RC1 Git:20190211-379f5f9903 19.0-ALPHA1 Git:20190419-c963b64487
def __init__(self):
import re
self.build_version = xbmc.getInfoLabel('System.BuildVersion')
# Parse the version number
result = re.search(r'\d+\.\d+', self.build_version)
self.version = result.group(0) if result else ''
# Parse the major version number
self.major_version = self.version.split('.')[0] if self.version else ''
# Parse the date of GIT build
result = re.search(r'(Git:)(\d+?(?=(-|$)))', self.build_version)
self.date = int(result.group(2)) if result and len(result.groups()) >= 2 else None
# Parse the stage name
result = re.search(r'(\d+\.\d+-)(.+)(?=\s)', self.build_version)
if not result:
result = re.search(r'^(.+)(-\d+\.\d+)', self.build_version)
self.stage = result.group(1) if result else ''
else:
self.stage = result.group(2) if result else ''
def is_major_ver(self, major_ver):
return bool(major_ver in self.major_version)
def is_less_version(self, ver):
return is_less_version(self.version, ver)
def __str__(self):
return self.build_version
| mit | -7,129,245,215,049,425,000 | 41.100806 | 120 | 0.646586 | false |
treycucco/pynet-kvp | server.py | 1 | 4819 | import argparse
import json
import logging
import time
from collections import defaultdict
from pynet import Node, NodeHooks, Encryptor, PeerDefinition
from pynet.util import run_node
SHUTDOWN = False
class KVPDB(object):
"""A per-owner dictionary.
Each owner has their own dictionary, this class exposes methods to get, set, query and delete
keys on a per-owner basis.
"""
def __init__(self):
self.db = defaultdict(dict)
def get(self, owner, key):
"""Get a value for a specific owner."""
return self.db[owner].get(key, None)
def set(self, owner, key, value):
"""Set a value for a specific owner."""
self.db[owner][key] = value
def contains(self, owner, key):
"""Determine if a key exists for an owner."""
return key in self.db[owner]
def delete(self, owner, key):
"""Remove a value for an owner."""
del self.db[owner][key]
class MessageHandler(object):
"""A callable object for handling messages on a node."""
def __init__(self):
self.db = KVPDB()
def __call__(self, sender, body):
parts = body.split(maxsplit=1)
if len(parts) == 2:
action, data = parts
else:
action, data = None, parts
if action == b"get":
log("get", data.decode())
if self.db.contains(sender, data):
return (True, self.db.get(sender, data), None)
else:
return (False, "Key not found", None)
elif action == b"set":
key, value = data.split(maxsplit=1)
log("set", key.decode())
self.db.set(sender, key, value)
return (True, None, None)
elif action == b"delete":
log("delete", data.decode())
return (True, self.db.delete(sender, data), None)
elif action == b"has":
log("has?", data.decode())
return (True, "yes" if self.db.contains(sender, data) else "no", None)
elif action == b"disconnect":
del self.db[sender]
self.node.remove_peer(sender)
return (True, None, None)
else:
return (False, "Unknown command", None)
class Hooks(NodeHooks):
"""Overrides the NodeHooks class to log messages and to allow ad-hoc peer registering."""
def handle_raw_message(self, data):
handled, wait_for_response = super().handle_raw_message(data)
if handled:
log("Received message", data.decode())
return (handled, wait_for_response)
def handle_unknown_peer(self, sender, body, signature):
if body.startswith(b"register\n") and body.count(b"\n") >= 1:
command, public_key = body.split(b"\n", maxsplit=1)
if self.node.has_peer(sender):
log("Peer already registered")
self.node.write(b"Peer already registered", encrypt=False)
else:
log("Registered peer", sender)
new_peer = PeerDefinition(sender, None, public_key)
self.node.add_peer(new_peer)
self.node.set_receiver(new_peer)
self.node.write_success(b"registered")
return (True, None)
else:
return super().handle_unknown_peer(sender, body, signature)
def main():
args = parse_args()
if args is not None:
args.func(args)
def parse_args():
argument_parser = argparse.ArgumentParser("A secured in-memory kvp server built on pynet")
subparsers = argument_parser.add_subparsers()
config_parser = subparsers.add_parser("config", description="Dump out a sample config")
config_parser.set_defaults(func=dump_config)
run_parser = subparsers.add_parser("run", description="Run the server")
run_parser.add_argument("config_file")
run_parser.set_defaults(func=run_server)
args = argument_parser.parse_args()
if hasattr(args, "func"):
return args
else:
argument_parser.print_help()
return None
def dump_config(args):
node = Node(None, "/tmp/pynet-kvp.sock")
node.add_peer(PeerDefinition("first among peers", "127.0.0.1:1337", Encryptor.new_key().publickey()))
print(json.dumps(node.get_config(), sort_keys=True, indent=2, separators=(", ", ": ")))
def run_server(args):
setup_logging(args)
with open(args.config_file, "r") as rf:
node = Node.from_config(MessageHandler(), json.load(rf), hooks=Hooks())
with run_node(node):
log("Listening on {0}".format(node.address))
# NOTE: There is no functionality for flipping the SHUTDOWN switch, so at this point you'll
# have to terminate the process manually.
while not SHUTDOWN:
time.sleep(1)
def setup_logging(args):
log_level = logging.INFO
logger = logging.getLogger()
logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(
"%(asctime)s\t%(levelname)s\t%(message)s",
"%Y-%m-%dT%H:%M:%S%z"))
logger.addHandler(handler)
return (logger, handler)
def log(*args):
logging.info("\t".join(args))
if __name__ == "__main__":
main()
| mit | 7,098,275,602,779,228,000 | 26.695402 | 103 | 0.653248 | false |
erkyrath/tworld | lib/twcommon/misc.py | 1 | 7280 | import datetime
import re
import unicodedata
# The maximum length of an editable description, such as a player desc
# or editstr line.
MAX_DESCLINE_LENGTH = 256
class SuiGeneris(object):
"""Factory for when you want an object distinguishable from all other
objects.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s>' % (self.name,)
def gen_bool_parse(val):
"""Convert a string, as a human might type it, to a boolean. Unrecognized
values raise an exception.
"""
val = val.strip()
if not val:
return False
try:
return bool(int(val))
except:
pass
ch = val[0]
if ch in {'t', 'T', 'y', 'Y'}:
return True
if ch in {'f', 'F', 'n', 'N'}:
return False
raise ValueError('"%s" does not look like a boolean' % (val,))
def now():
"""Utility function: return "now" as an aware UTC datetime object.
"""
return datetime.datetime.now(datetime.timezone.utc)
def gen_datetime_format(obj):
"""Utility function: convert a datetime to a clean-looking string.
(No timezone part; no time part if there is none.)
"""
obj = obj.replace(tzinfo=None)
if obj.hour == 0 and obj.minute == 0 and obj.second == 0 and obj.microsecond == 0:
return obj.strftime('%Y-%m-%d')
else:
return str(obj)
def gen_datetime_parse(val):
"""Utility function: convert a simple string (as produced by
gen_datetime_format) into an aware UTC datetime object.
"""
try:
res = datetime.datetime.strptime(val, '%Y-%m-%d')
return res.replace(tzinfo=datetime.timezone.utc)
except:
pass
try:
res = datetime.datetime.strptime(val, '%Y-%m-%d %H:%M:%S')
return res.replace(tzinfo=datetime.timezone.utc)
except:
pass
try:
res = datetime.datetime.strptime(val, '%Y-%m-%d %H:%M:%S.%f')
return res.replace(tzinfo=datetime.timezone.utc)
except:
pass
raise Exception('Date-time format not recognized: %s' % (val,))
def timedelta_two_units(delta):
"""Render a timedelta in English, to two units (seconds, minutes, hours,
days).
"""
if delta <= datetime.timedelta(seconds=0):
return 'just now'
total_seconds = int(delta.total_seconds())
if delta.days:
days = delta.days
hours = delta.seconds // 3600
time = '%d day%s' % (days, ('' if days==1 else 's'))
if hours:
time += ', %s hour%s' % (hours, ('' if hours==1 else 's'))
return time
hours = total_seconds // 3600
minutes = (total_seconds - (hours*3600)) // 60
if hours:
time = '%s hour%s' % (hours, ('' if hours==1 else 's'))
if minutes:
time += ', %s minute%s' % (minutes, ('' if minutes==1 else 's'))
return time
seconds = total_seconds - (minutes*60)
if minutes:
time = '%s minute%s' % (minutes, ('' if minutes==1 else 's'))
if seconds:
time += ', %s second%s' % (seconds, ('' if seconds==1 else 's'))
return time
time = '%s second%s' % (seconds, ('' if seconds==1 else 's'))
return time
def is_typed_dict(obj, typ):
"""Returns true if obj is a dict and has a field 'type'=typ.
"""
return (type(obj) is dict and obj.get('type', None) == typ)
# Regexps for sluggify
re_nonidentchars = re.compile('[^a-z0-9_ ]+')
re_extrawhite = re.compile(' +')
re_startdigit = re.compile('^[0-9]')
def sluggify(text):
"""
Convert an arbitrary string to a valid Python (2) identifier that
'reads the same'. We preserve letters and digits, while lowercasing
and converting other characters to underscores. We try to avoid too
many underscores in a row, but also try to keep them meaningful. (So
'dr who' and 'Dr__Who' sluggify differently.)
See also re_valididentifier in tweblib/handlers.py.
### Would be nice to follow Py3 identifier rules here, for Unicode.
"""
text = text.lower()
text = unicodedata.normalize('NFKD', text) # Split off accent marks
text = re_nonidentchars.sub(' ', text) # Punctuation to spaces
text = re_extrawhite.sub(' ', text) # Remove redundant spaces
text = text.strip()
text = text.replace(' ', '_')
if not text or re_startdigit.match(text):
# Must not be empty or start with a digit
text = '_' + text
return text
import unittest
class TestMiscModule(unittest.TestCase):
def test_genboolparse(self):
self.assertEqual(gen_bool_parse(''), False)
self.assertEqual(gen_bool_parse(' '), False)
self.assertEqual(gen_bool_parse('0'), False)
self.assertEqual(gen_bool_parse('1'), True)
self.assertEqual(gen_bool_parse('2'), True)
self.assertEqual(gen_bool_parse('01'), True)
self.assertEqual(gen_bool_parse('t'), True)
self.assertEqual(gen_bool_parse('true'), True)
self.assertEqual(gen_bool_parse(' TRUE '), True)
self.assertEqual(gen_bool_parse('f'), False)
self.assertEqual(gen_bool_parse(' false '), False)
self.assertEqual(gen_bool_parse('False'), False)
self.assertEqual(gen_bool_parse('yes'), True)
self.assertEqual(gen_bool_parse('Y'), True)
self.assertEqual(gen_bool_parse('no'), False)
self.assertEqual(gen_bool_parse('N'), False)
self.assertRaises(ValueError, gen_bool_parse, 'x')
self.assertRaises(ValueError, gen_bool_parse, '?')
self.assertRaises(ValueError, gen_bool_parse, '1.1')
self.assertRaises(ValueError, gen_bool_parse, '.')
def test_gendatetime(self):
date1 = datetime.datetime(year=2013, month=7, day=16, tzinfo=datetime.timezone.utc)
self.assertEqual(gen_datetime_parse('2013-07-16'), date1)
self.assertEqual(gen_datetime_format(date1), '2013-07-16')
date2 = datetime.datetime(year=2001, month=1, day=1, hour=2, minute=3, second=5, tzinfo=datetime.timezone.utc)
self.assertEqual(gen_datetime_parse('2001-01-01 02:03:05'), date2)
self.assertEqual(gen_datetime_format(date2), '2001-01-01 02:03:05')
date3 = datetime.datetime(year=2199, month=12, day=31, hour=23, minute=59, second=59, microsecond=123456, tzinfo=datetime.timezone.utc)
self.assertEqual(gen_datetime_parse('2199-12-31 23:59:59.123456'), date3)
self.assertEqual(gen_datetime_format(date3), '2199-12-31 23:59:59.123456')
date4 = now()
self.assertEqual(date4, gen_datetime_parse(gen_datetime_format(date4)))
def test_sluggify(self):
tests = [
('', '_'), (' ', '_'), (' ', '_'), (' ', '_'),
('_', '_'), ('__', '__'), ('___', '___'),
('.', '_'), ('..', '_'), ('. . .', '_'),
(' _ ', '_'), (' _ _ ', '___'),
('a', 'a'), ('Hello', 'hello'), (' one two ', 'one_two'),
('Dr. Who?', 'dr_who'), ('Dr__who', 'dr__who'),
('x\xE4 \xF8b', 'xa_b'), ('x\u24E4\xB9\uFF0A\uFF21y', 'xu1_ay'),
('a-Z_0-9', 'a_z_0_9'), ('95', '_95'), ('.001a', '_001a'),
]
for (val, res) in tests:
self.assertEqual(sluggify(val), res)
if __name__ == '__main__':
unittest.main()
| mit | 5,393,715,952,904,703,000 | 35.954315 | 143 | 0.583791 | false |
lo-ise/sea_ice_downloader | seaicedata.py | 1 | 2942 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
SeaIceData
A QGIS plugin
Downloads sea ice concentration data from NSIDC
-------------------
begin : 2014-10-02
copyright : (C) 2014 by Louise Ireland
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from seaicedatadialog import SeaIceDataDialog
import os.path
class SeaIceData:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'seaicedata_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = SeaIceDataDialog()
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/seaicedata/icon.png"),
u"Sea Ice Data Downloader", self.iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Sea Ice Data Downloader", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Sea Ice Data Downloader", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# show the dialog
self.dlg.show()
| mit | -5,549,265,629,633,507,000 | 38.756757 | 93 | 0.520054 | false |
apexkid/Wikiapiary | apiary/tasks/bot/extension_weekly.py | 1 | 1462 | """Process weekly activities for extensions."""
# pylint: disable=C0301,C0103,W1201
from apiary.tasks import BaseApiaryTask
import logging
import datetime
LOGGER = logging.getLogger()
class ExtensionWeekly(BaseApiaryTask):
"""Kick off weekly tasks for extensions."""
def run(self, curr_day = None, curr_hour = None):
"""Process the list of extensions."""
# Allow these to be passed in for testing
if curr_day is None:
curr_day = int(datetime.datetime.now().strftime("%w"))
if curr_hour is None:
curr_hour = int(datetime.datetime.now().strftime("%H"))
LOGGER.info ("Processing extensions for day segment %d and hour segment %d" % (curr_day, curr_hour))
my_query = ''.join([
"[[Category:Extension]]",
"[[Has day segment::%d]]" % curr_day,
"[[Has hour segment::%d]]" % curr_hour,
"|sort=Creation date",
"|limit=1000"])
LOGGER.debug ("Query: %s" % my_query)
extensions = self.bumble_bee.call({
'action': 'ask',
'query': my_query
})
i = 0
for extension in extensions['query']['results'].items():
i += 1
LOGGER.info(extension)
LOGGER.info("Processing extension %s" % extension[1]['fulltext'])
# Now call tasks to operate on extensions
MwTask.run(extension)
pass
return i
| gpl-2.0 | 805,745,656,183,699,000 | 28.836735 | 108 | 0.567031 | false |
interlegis/saap | config/rest_framework/views.py | 1 | 17934 | """
Provides an APIView class that is the base of all views in REST framework.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import connection, models, transaction
from django.http import Http404
from django.http.response import HttpResponseBase
from django.utils.cache import cc_delim_re, patch_vary_headers
from django.utils.encoding import smart_text
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from rest_framework import exceptions, status
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.schemas import DefaultSchema
from rest_framework.settings import api_settings
from rest_framework.utils import formatting
def get_view_name(view_cls, suffix=None):
"""
Given a view class, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting.
"""
name = view_cls.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
if suffix:
name += ' ' + suffix
return name
def get_view_description(view_cls, html=False):
"""
Given a view class, return a textual description to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting.
"""
description = view_cls.__doc__ or ''
description = formatting.dedent(smart_text(description))
if html:
return formatting.markup_description(description)
return description
def set_rollback():
atomic_requests = connection.settings_dict.get('ATOMIC_REQUESTS', False)
if atomic_requests and connection.in_atomic_block:
transaction.set_rollback(True)
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, Http404):
exc = exceptions.NotFound()
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied()
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'detail': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
return None
class APIView(View):
# The following policies may be set at either globally, or per-view.
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES
permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES
content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS
metadata_class = api_settings.DEFAULT_METADATA_CLASS
versioning_class = api_settings.DEFAULT_VERSIONING_CLASS
# Allow dependency injection of other settings to make testing easier.
settings = api_settings
schema = DefaultSchema()
@classmethod
def as_view(cls, **initkwargs):
"""
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
"""
if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet):
def force_evaluation():
raise RuntimeError(
'Do not evaluate the `.queryset` attribute directly, '
'as the result will be cached and reused between requests. '
'Use `.all()` or call `.get_queryset()` instead.'
)
cls.queryset._fetch_all = force_evaluation
view = super(APIView, cls).as_view(**initkwargs)
view.cls = cls
view.initkwargs = initkwargs
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
return csrf_exempt(view)
@property
def allowed_methods(self):
"""
Wrap Django's private `_allowed_methods` interface in a public property.
"""
return self._allowed_methods()
@property
def default_response_headers(self):
headers = {
'Allow': ', '.join(self.allowed_methods),
}
if len(self.renderer_classes) > 1:
headers['Vary'] = 'Accept'
return headers
def http_method_not_allowed(self, request, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
raise exceptions.MethodNotAllowed(request.method)
def permission_denied(self, request, message=None):
"""
If request is not permitted, determine what kind of exception to raise.
"""
if request.authenticators and not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied(detail=message)
def throttled(self, request, wait):
"""
If request is throttled, determine what kind of exception to raise.
"""
raise exceptions.Throttled(wait)
def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request)
def get_parser_context(self, http_request):
"""
Returns a dict that is passed through to Parser.parse(),
as the `parser_context` keyword argument.
"""
# Note: Additionally `request` and `encoding` will also be added
# to the context by the Request object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {})
}
def get_renderer_context(self):
"""
Returns a dict that is passed through to Renderer.render(),
as the `renderer_context` keyword argument.
"""
# Note: Additionally 'response' will also be added to the context,
# by the Response object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_exception_handler_context(self):
"""
Returns a dict that is passed through to EXCEPTION_HANDLER,
as the `context` argument.
"""
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_view_name(self):
"""
Return the view name, as used in OPTIONS responses and in the
browsable API.
"""
func = self.settings.VIEW_NAME_FUNCTION
return func(self.__class__, getattr(self, 'suffix', None))
def get_view_description(self, html=False):
"""
Return some descriptive text for the view, as used in OPTIONS responses
and in the browsable API.
"""
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self.__class__, html)
# API policy instantiation methods
def get_format_suffix(self, **kwargs):
"""
Determine if the request includes a '.json' style format suffix
"""
if self.settings.FORMAT_SUFFIX_KWARG:
return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
return [renderer() for renderer in self.renderer_classes]
def get_parsers(self):
"""
Instantiates and returns the list of parsers that this view can use.
"""
return [parser() for parser in self.parser_classes]
def get_authenticators(self):
"""
Instantiates and returns the list of authenticators that this view can use.
"""
return [auth() for auth in self.authentication_classes]
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
return [permission() for permission in self.permission_classes]
def get_throttles(self):
"""
Instantiates and returns the list of throttles that this view uses.
"""
return [throttle() for throttle in self.throttle_classes]
def get_content_negotiator(self):
"""
Instantiate and return the content negotiation class to use.
"""
if not getattr(self, '_negotiator', None):
self._negotiator = self.content_negotiation_class()
return self._negotiator
def get_exception_handler(self):
"""
Returns the exception handler that this view uses.
"""
return self.settings.EXCEPTION_HANDLER
# API policy implementation methods
def perform_content_negotiation(self, request, force=False):
"""
Determine which renderer and media type to use render the response.
"""
renderers = self.get_renderers()
conneg = self.get_content_negotiator()
try:
return conneg.select_renderer(request, renderers, self.format_kwarg)
except Exception:
if force:
return (renderers[0], renderers[0].media_type)
raise
def perform_authentication(self, request):
"""
Perform authentication on the incoming request.
Note that if you override this and simply 'pass', then authentication
will instead be performed lazily, the first time either
`request.user` or `request.auth` is accessed.
"""
request.user
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_permission(request, self):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_object_permission(request, self, obj):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait())
def determine_version(self, request, *args, **kwargs):
"""
If versioning is being used, then determine any API version for the
incoming request. Returns a two-tuple of (version, versioning_scheme)
"""
if self.versioning_class is None:
return (None, None)
scheme = self.versioning_class()
return (scheme.determine_version(request, *args, **kwargs), scheme)
# Dispatch methods
def initialize_request(self, request, *args, **kwargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(
request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context
)
def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
# Determine the API version, if versioning is in use.
version, scheme = self.determine_version(request, *args, **kwargs)
request.version, request.versioning_scheme = version, scheme
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_permissions(request)
self.check_throttles(request)
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, HttpResponseBase), (
'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '
'to be returned from the view, but received a `%s`'
% type(response)
)
if isinstance(response, Response):
if not getattr(request, 'accepted_renderer', None):
neg = self.perform_content_negotiation(request, force=True)
request.accepted_renderer, request.accepted_media_type = neg
response.accepted_renderer = request.accepted_renderer
response.accepted_media_type = request.accepted_media_type
response.renderer_context = self.get_renderer_context()
# Add new vary headers to the response instead of overwriting.
vary_headers = self.headers.pop('Vary', None)
if vary_headers is not None:
patch_vary_headers(response, cc_delim_re.split(vary_headers))
for key, value in self.headers.items():
response[key] = value
return response
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
exc.auth_header = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
exception_handler = self.get_exception_handler()
context = self.get_exception_handler_context()
response = exception_handler(exc, context)
if response is None:
self.raise_uncaught_exception(exc)
response.exception = True
return response
def raise_uncaught_exception(self, exc):
if settings.DEBUG:
request = self.request
renderer_format = getattr(request.accepted_renderer, 'format')
use_plaintext_traceback = renderer_format not in ('html', 'api', 'admin')
request.force_plaintext_errors(use_plaintext_traceback)
raise
# Note: Views are made CSRF exempt from within `as_view` as to prevent
# accidental removal of this exemption in cases where `dispatch` needs to
# be overridden.
def dispatch(self, request, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def options(self, request, *args, **kwargs):
"""
Handler method for HTTP 'OPTIONS' request.
"""
if self.metadata_class is None:
return self.http_method_not_allowed(request, *args, **kwargs)
data = self.metadata_class().determine_metadata(request, self)
return Response(data, status=status.HTTP_200_OK)
| gpl-3.0 | 3,719,835,170,683,816,000 | 35.230303 | 85 | 0.627913 | false |
google-research/disentanglement_lib | disentanglement_lib/utils/resources.py | 1 | 1396 | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to access resources in package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def get_file(path):
"""Returns path relative to file."""
from pkg_resources import resource_filename # pylint: disable=g-bad-import-order, g-import-not-at-top
return resource_filename("disentanglement_lib", path)
def get_files_in_folder(path):
import pkg_resources # pylint: disable=g-bad-import-order, g-import-not-at-top
for name in pkg_resources.resource_listdir("disentanglement_lib", path):
new_path = path + "/" + name
if not pkg_resources.resource_isdir("disentanglement_lib", new_path):
yield pkg_resources.resource_filename("disentanglement_lib", new_path)
| apache-2.0 | 3,771,351,769,896,885,000 | 37.777778 | 104 | 0.746418 | false |
privacyidea/privacyidea | privacyidea/lib/auditmodules/sqlaudit.py | 1 | 24906 | # -*- coding: utf-8 -*-
#
# 2016-04-08 Cornelius Kölbel <[email protected]>
# Avoid consecutive if statements
#
# privacyIDEA
# May 11, 2014 Cornelius Kölbel, [email protected]
# http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
__doc__ = """The SQL Audit Module is used to write audit entries to an SQL
database.
The SQL Audit Module is configured like this:
PI_AUDIT_MODULE = "privacyidea.lib.auditmodules.sqlaudit"
PI_AUDIT_KEY_PRIVATE = "tests/testdata/private.pem"
PI_AUDIT_KEY_PUBLIC = "tests/testdata/public.pem"
PI_AUDIT_SERVERNAME = "your choice"
Optional:
PI_AUDIT_SQL_URI = "sqlite://"
PI_AUDIT_SQL_TRUNCATE = True | False
PI_AUDIT_SQL_COLUMN_LENGTH = {"user": 60, "info": 10 ...}
If the PI_AUDIT_SQL_URI is omitted the Audit data is written to the
token database.
"""
import logging
from collections import OrderedDict
from privacyidea.lib.auditmodules.base import (Audit as AuditBase, Paginate)
from privacyidea.lib.crypto import Sign
from privacyidea.lib.pooling import get_engine
from privacyidea.lib.utils import censor_connect_string
from privacyidea.lib.lifecycle import register_finalizer
from privacyidea.lib.utils import truncate_comma_list, is_true
from sqlalchemy import MetaData, cast, String
from sqlalchemy import asc, desc, and_, or_
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
import datetime
import traceback
from six import string_types
from privacyidea.models import audit_column_length as column_length
from privacyidea.models import Audit as LogEntry
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
log = logging.getLogger(__name__)
metadata = MetaData()
# Define function to convert SQL DateTime objects to an ISO-format string
# By using <https://docs.sqlalchemy.org/en/13/core/compiler.html> we can
# differentiate between different dialects.
class to_isodate(expression.FunctionElement):
name = 'to_isodate'
@compiles(to_isodate, 'oracle')
@compiles(to_isodate, 'postgresql')
def fn_to_isodate(element, compiler, **kw):
return "to_char(%s, 'IYYY-MM-DD HH24:MI:SS')" % compiler.process(element.clauses, **kw)
@compiles(to_isodate, 'sqlite')
def fn_to_isodate(element, compiler, **kw):
# sqlite does not have a DateTime type, they are already in ISO format
return "%s" % compiler.process(element.clauses, **kw)
@compiles(to_isodate)
def fn_to_isodate(element, compiler, **kw):
# The four percent signs are necessary for two format substitutions
return "date_format(%s, '%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')" % compiler.process(
element.clauses, **kw)
class Audit(AuditBase):
"""
This is the SQLAudit module, which writes the audit entries
to an SQL database table.
It requires the configuration parameters in pi.cfg:
* PI_AUDIT_KEY_PUBLIC
* PI_AUDIT_KEY_PRIVATE
If you want to host the SQL Audit database in another DB than the
token DB, you can use:
* PI_AUDIT_SQL_URI
It also takes the optional parameters:
* PI_AUDIT_POOL_SIZE
* PI_AUDIT_POOL_RECYCLE
* PI_AUDIT_SQL_TRUNCATE
* PI_AUDIT_NO_SIGN
You can use PI_AUDIT_NO_SIGN = True to avoid signing of the audit log.
If PI_CHECK_OLD_SIGNATURES = True old style signatures (text-book RSA) will
be checked as well, otherwise they will be marked as 'FAIL'.
"""
is_readable = True
def __init__(self, config=None, startdate=None):
super(Audit, self).__init__(config, startdate)
self.name = "sqlaudit"
self.sign_data = not self.config.get("PI_AUDIT_NO_SIGN")
self.sign_object = None
self.verify_old_sig = self.config.get('PI_CHECK_OLD_SIGNATURES')
if self.sign_data:
self.read_keys(self.config.get("PI_AUDIT_KEY_PUBLIC"),
self.config.get("PI_AUDIT_KEY_PRIVATE"))
self.sign_object = Sign(self.private, self.public)
# Read column_length from the config file
config_column_length = self.config.get("PI_AUDIT_SQL_COLUMN_LENGTH", {})
# fill the missing parts with the default from the models
self.custom_column_length = {k: (v if k not in config_column_length else config_column_length[k])
for k, v in column_length.items()}
# We can use "sqlaudit" as the key because the SQLAudit connection
# string is fixed for a running privacyIDEA instance.
# In other words, we will not run into any problems with changing connect strings.
self.engine = get_engine(self.name, self._create_engine)
# create a configured "Session" class. ``scoped_session`` is not
# necessary because we do not share session objects among threads.
# We use it anyway as a safety measure.
Session = scoped_session(sessionmaker(bind=self.engine))
self.session = Session()
# Ensure that the connection gets returned to the pool when the request has
# been handled. This may close an already-closed session, but this is not a problem.
register_finalizer(self._finalize_session)
self.session._model_changes = {}
def _create_engine(self):
"""
:return: a new SQLAlchemy engine connecting to the database specified in PI_AUDIT_SQL_URI.
"""
# an Engine, which the Session will use for connection
# resources
connect_string = self.config.get("PI_AUDIT_SQL_URI", self.config.get(
"SQLALCHEMY_DATABASE_URI"))
log.debug("using the connect string {0!s}".format(censor_connect_string(connect_string)))
try:
pool_size = self.config.get("PI_AUDIT_POOL_SIZE", 20)
engine = create_engine(
connect_string,
pool_size=pool_size,
pool_recycle=self.config.get("PI_AUDIT_POOL_RECYCLE", 600))
log.debug("Using SQL pool size of {}".format(pool_size))
except TypeError:
# SQLite does not support pool_size
engine = create_engine(connect_string)
log.debug("Using no SQL pool_size.")
return engine
def _finalize_session(self):
""" Close current session and dispose connections of db engine"""
self.session.close()
self.engine.dispose()
def _truncate_data(self):
"""
Truncate self.audit_data according to the self.custom_column_length.
:return: None
"""
for column, l in self.custom_column_length.items():
if column in self.audit_data:
data = self.audit_data[column]
if isinstance(data, string_types):
if column == "policies":
# The policies column is shortened per comma entry
data = truncate_comma_list(data, l)
else:
data = data[:l]
self.audit_data[column] = data
@staticmethod
def _create_filter(param, timelimit=None):
"""
create a filter condition for the logentry
"""
conditions = []
param = param or {}
for search_key in param.keys():
search_value = param.get(search_key)
if search_key == "allowed_audit_realm":
# Add each realm in the allowed_audit_realm list to the
# search condition
realm_conditions = []
for realm in search_value:
realm_conditions.append(LogEntry.realm == realm)
filter_realm = or_(*realm_conditions)
conditions.append(filter_realm)
# We do not search if the search value only consists of '*'
elif search_value.strip() != '' and search_value.strip('*') != '':
try:
if search_key == "success":
# "success" is the only integer.
search_value = search_value.strip("*")
conditions.append(getattr(LogEntry, search_key) ==
int(is_true(search_value)))
else:
# All other keys are compared as strings
column = getattr(LogEntry, search_key)
if search_key in ["date", "startdate"]:
# but we cast a column with a DateTime type to an
# ISO-format string first
column = to_isodate(column)
search_value = search_value.replace('*', '%')
if '%' in search_value:
conditions.append(column.like(search_value))
else:
conditions.append(column == search_value)
except Exception as exx:
# The search_key was no search key but some
# bullshit stuff in the param
log.debug("Not a valid searchkey: {0!s}".format(exx))
if timelimit:
conditions.append(LogEntry.date >= datetime.datetime.now() -
timelimit)
# Combine them with or to a BooleanClauseList
filter_condition = and_(*conditions)
return filter_condition
def get_total(self, param, AND=True, display_error=True, timelimit=None):
"""
This method returns the total number of audit entries
in the audit store
"""
count = 0
# if param contains search filters, we build the search filter
# to only return the number of those entries
filter_condition = self._create_filter(param, timelimit=timelimit)
try:
count = self.session.query(LogEntry.id) \
.filter(filter_condition) \
.count()
finally:
self.session.close()
return count
def finalize_log(self):
"""
This method is used to log the data.
It should hash the data and do a hash chain and sign the data
"""
try:
self.audit_data["policies"] = ",".join(self.audit_data.get("policies", []))
if self.config.get("PI_AUDIT_SQL_TRUNCATE"):
self._truncate_data()
if "tokentype" in self.audit_data:
log.warning("We have a wrong 'tokentype' key. This should not happen. Fix it!. "
"Error occurs in action: {0!r}.".format(self.audit_data.get("action")))
if not "token_type" in self.audit_data:
self.audit_data["token_type"] = self.audit_data.get("tokentype")
if self.audit_data.get("startdate"):
duration = datetime.datetime.now() - self.audit_data.get("startdate")
else:
duration = None
le = LogEntry(action=self.audit_data.get("action"),
success=int(self.audit_data.get("success", 0)),
serial=self.audit_data.get("serial"),
token_type=self.audit_data.get("token_type"),
user=self.audit_data.get("user"),
realm=self.audit_data.get("realm"),
resolver=self.audit_data.get("resolver"),
administrator=self.audit_data.get("administrator"),
action_detail=self.audit_data.get("action_detail"),
info=self.audit_data.get("info"),
privacyidea_server=self.audit_data.get("privacyidea_server"),
client=self.audit_data.get("client", ""),
loglevel=self.audit_data.get("log_level"),
clearance_level=self.audit_data.get("clearance_level"),
policies=self.audit_data.get("policies"),
startdate=self.audit_data.get("startdate"),
duration=duration
)
self.session.add(le)
self.session.commit()
# Add the signature
if self.sign_data and self.sign_object:
s = self._log_to_string(le)
sign = self.sign_object.sign(s)
le.signature = sign
self.session.merge(le)
self.session.commit()
except Exception as exx: # pragma: no cover
# in case of a Unicode Error in _log_to_string() we won't have
# a signature, but the log entry is available
log.error("exception {0!r}".format(exx))
log.error("DATA: {0!s}".format(self.audit_data))
log.debug("{0!s}".format(traceback.format_exc()))
self.session.rollback()
finally:
self.session.close()
# clear the audit data
self.audit_data = {}
def _check_missing(self, audit_id):
"""
Check if the audit log contains the entries before and after
the given id.
TODO: We can not check at the moment if the first or the last entries
were deleted. If we want to do this, we need to store some signed
meta information:
1. Which one was the first entry. (use initialize_log)
2. Which one was the last entry.
"""
res = False
try:
id_bef = self.session.query(LogEntry.id
).filter(LogEntry.id ==
int(audit_id) - 1).count()
id_aft = self.session.query(LogEntry.id
).filter(LogEntry.id ==
int(audit_id) + 1).count()
# We may not do a commit!
# self.session.commit()
if id_bef and id_aft:
res = True
except Exception as exx: # pragma: no cover
log.error("exception {0!r}".format(exx))
log.debug("{0!s}".format(traceback.format_exc()))
# self.session.rollback()
finally:
# self.session.close()
pass
return res
@staticmethod
def _log_to_string(le):
"""
This function creates a string from the logentry so
that this string can be signed.
Note: Not all elements of the LogEntry are used to generate the
string (the Signature is not!), otherwise we could have used pickle
:param le: LogEntry object containing the data
:type le: LogEntry
:rtype str
"""
s = u"id=%s,date=%s,action=%s,succ=%s,serial=%s,t=%s,u=%s,r=%s,adm=%s," \
u"ad=%s,i=%s,ps=%s,c=%s,l=%s,cl=%s" % (le.id,
le.date,
le.action,
le.success,
le.serial,
le.token_type,
le.user,
le.realm,
le.administrator,
le.action_detail,
le.info,
le.privacyidea_server,
le.client,
le.loglevel,
le.clearance_level)
# If we have the new log entries, we also add them for signing and verification.
if le.startdate:
s += ",{0!s}".format(le.startdate)
if le.duration:
s += ",{0!s}".format(le.duration)
return s
@staticmethod
def _get_logentry_attribute(key):
"""
This function returns the LogEntry attribute for the given key value
"""
sortname = {'number': LogEntry.id,
'action': LogEntry.action,
'success': LogEntry.success,
'serial': LogEntry.serial,
'date': LogEntry.date,
'startdate': LogEntry.startdate,
'duration': LogEntry.duration,
'token_type': LogEntry.token_type,
'user': LogEntry.user,
'realm': LogEntry.realm,
'administrator': LogEntry.administrator,
'action_detail': LogEntry.action_detail,
'info': LogEntry.info,
'privacyidea_server': LogEntry.privacyidea_server,
'client': LogEntry.client,
'log_level': LogEntry.loglevel,
'policies': LogEntry.policies,
'clearance_level': LogEntry.clearance_level}
return sortname.get(key)
def csv_generator(self, param=None, user=None, timelimit=None):
"""
Returns the audit log as csv file.
:param timelimit: Limit the number of dumped entries by time
:type timelimit: datetime.timedelta
:param param: The request parameters
:type param: dict
:param user: The user, who issued the request
:return: None. It yields results as a generator
"""
filter_condition = self._create_filter(param,
timelimit=timelimit)
logentries = self.session.query(LogEntry).filter(filter_condition).all()
for le in logentries:
audit_dict = self.audit_entry_to_dict(le)
yield u",".join([u"'{0!s}'".format(x) for x in audit_dict.values()]) + u"\n"
def get_count(self, search_dict, timedelta=None, success=None):
# create filter condition
filter_condition = self._create_filter(search_dict)
conditions = [filter_condition]
if success is not None:
conditions.append(LogEntry.success == int(is_true(success)))
if timedelta is not None:
conditions.append(LogEntry.date >= datetime.datetime.now() -
timedelta)
filter_condition = and_(*conditions)
log_count = self.session.query(LogEntry).filter(filter_condition).count()
return log_count
def search(self, search_dict, page_size=15, page=1, sortorder="asc",
timelimit=None):
"""
This function returns the audit log as a Pagination object.
:param timelimit: Only audit entries newer than this timedelta will
be searched
:type timelimit: timedelta
"""
page = int(page)
page_size = int(page_size)
paging_object = Paginate()
paging_object.page = page
paging_object.total = self.get_total(search_dict, timelimit=timelimit)
if page > 1:
paging_object.prev = page - 1
if paging_object.total > (page_size * page):
paging_object.next = page + 1
auditIter = self.search_query(search_dict, page_size=page_size,
page=page, sortorder=sortorder,
timelimit=timelimit)
while True:
try:
le = next(auditIter)
# Fill the list
paging_object.auditdata.append(self.audit_entry_to_dict(le))
except StopIteration as _e:
log.debug("Interation stopped.")
break
except UnicodeDecodeError as _e:
# Unfortunately if one of the audit entries fails, the whole
# iteration stops and we return an empty paging_object.
# TODO: Check if we can return the other entries in the auditIter
# or some meaningful error for the user.
log.warning('Could not read audit log entry! '
'Possible database encoding mismatch.')
log.debug("{0!s}".format(traceback.format_exc()))
return paging_object
def search_query(self, search_dict, page_size=15, page=1, sortorder="asc",
sortname="number", timelimit=None):
"""
This function returns the audit log as an iterator on the result
:param timelimit: Only audit entries newer than this timedelta will
be searched
:type timelimit: timedelta
"""
logentries = None
try:
limit = int(page_size)
offset = (int(page) - 1) * limit
# create filter condition
filter_condition = self._create_filter(search_dict,
timelimit=timelimit)
if sortorder == "desc":
logentries = self.session.query(LogEntry).filter(
filter_condition).order_by(
desc(self._get_logentry_attribute("number"))).limit(
limit).offset(offset)
else:
logentries = self.session.query(LogEntry).filter(
filter_condition).order_by(
asc(self._get_logentry_attribute("number"))).limit(
limit).offset(offset)
except Exception as exx: # pragma: no cover
log.error("exception {0!r}".format(exx))
log.debug("{0!s}".format(traceback.format_exc()))
self.session.rollback()
finally:
self.session.close()
if logentries is None:
return iter([])
else:
return iter(logentries)
def clear(self):
"""
Deletes all entries in the database table.
This is only used for test cases!
:return:
"""
self.session.query(LogEntry).delete()
self.session.commit()
def audit_entry_to_dict(self, audit_entry):
sig = None
if self.sign_data:
try:
sig = self.sign_object.verify(self._log_to_string(audit_entry),
audit_entry.signature,
self.verify_old_sig)
except UnicodeDecodeError as _e:
# TODO: Unless we trace and eliminate the broken unicode in the
# audit_entry, we will get issues when packing the response.
log.warning('Could not verify log entry! We get invalid values '
'from the database, please check the encoding.')
log.debug('{0!s}'.format(traceback.format_exc()))
is_not_missing = self._check_missing(int(audit_entry.id))
# is_not_missing = True
audit_dict = OrderedDict()
audit_dict['number'] = audit_entry.id
audit_dict['date'] = audit_entry.date.isoformat()
audit_dict['sig_check'] = "OK" if sig else "FAIL"
audit_dict['missing_line'] = "OK" if is_not_missing else "FAIL"
audit_dict['action'] = audit_entry.action
audit_dict['success'] = audit_entry.success
audit_dict['serial'] = audit_entry.serial
audit_dict['token_type'] = audit_entry.token_type
audit_dict['user'] = audit_entry.user
audit_dict['realm'] = audit_entry.realm
audit_dict['resolver'] = audit_entry.resolver
audit_dict['administrator'] = audit_entry.administrator
audit_dict['action_detail'] = audit_entry.action_detail
audit_dict['info'] = audit_entry.info
audit_dict['privacyidea_server'] = audit_entry.privacyidea_server
audit_dict['policies'] = audit_entry.policies
audit_dict['client'] = audit_entry.client
audit_dict['log_level'] = audit_entry.loglevel
audit_dict['clearance_level'] = audit_entry.clearance_level
audit_dict['startdate'] = audit_entry.startdate.isoformat() if audit_entry.startdate else None
audit_dict['duration'] = audit_entry.duration.total_seconds() if audit_entry.duration else None
return audit_dict
| agpl-3.0 | -2,165,985,169,104,467,700 | 42.462478 | 105 | 0.557541 | false |
jhuapl-marti/marti | crits/config/database.py | 1 | 1685 | # This is an example file. You should copy this to "database.py" and
# make your changes there.
# Modifying this example file will not change the settings that CRITs uses.
# MongoDB connection information
MONGO_HOST = 'localhost' # server to connect to
MONGO_PORT = 27017 # port MongoD is running on
MONGO_DATABASE = 'crits' # database name to connect to
# The following optional settings should only be changed if you specifically
# enabled and configured them during your MongoDB installation
# See http://docs.mongodb.org/v2.4/administration/security/ regarding implementation
MONGO_SSL = False # whether MongoD has SSL enabled
MONGO_USER = '' # mongo user with "readWrite" role in the database
MONGO_PASSWORD = '' # password for the mongo user
# Set this to a sufficiently long random string. We recommend running
# the following code from a python shell to generate the string and pasting
# the output here.
#
# from django.utils.crypto import get_random_string as grs
# print grs(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
SECRET_KEY = 'wM3FAP5!]u08Ooe&lBwJ&V#B72t@@q@>[kCutnYxW>V!!Ic5rc'
# DB to use for files
FILE_DB = GRIDFS # Set to S3 (NO QUOTES) to use S3. You'll also want to set
# the stuff below and create your buckets.
# Separator to use in bucket names (if needed)
#S3_SEPARATOR = '.'
# Unique ID to append to bucket names (if needed)
#S3_ID=""
# S3 credentials (if needed)
#AWS_ACCESS_KEY_ID = ""
#AWS_SECRET_ACCESS_KEY = ""
# If your S3 location is somewhere other than s3.amazonaws.com, then you
# can specify a different hostname here. (if needed)
#S3_HOSTNAME = ""
| mit | 8,853,616,078,283,250,000 | 40.097561 | 84 | 0.709792 | false |
a-holm/MachinelearningAlgorithms | Classification/KernelSVM/regularSoftMarginSVM.py | 1 | 1617 | # -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
This file uses sklearn and other common python libraries to solve the SVM and
includes contemporary ways to use SVMs, including how to separate more than two
classes of data with One-vs-rest (OVR) and One-vs-One (OVO) because SVMs are
binary classifiers so intially they only classify into two classes.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python regularSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.model_selection import train_test_split
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?', -99999, inplace=True) # make missing attribute values outliers
df.drop(['id'], 1, inplace=True) # remove useless column
X = np.array(df.drop(['class'], 1)) # features
y = np.array(df['class']) # labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = svm.SVC(kernel='linear', C=100.0) # Linear kernel with soft margin
clf.fit(X_train, y_train)
# Could have saved in a pickle, but not a very large data set.
accuracy = clf.score(X_test, y_test)
print(accuracy)
| mit | -7,828,566,781,055,919,000 | 35.75 | 79 | 0.746444 | false |
openafs-contrib/afspy | afs/service/VolumeService.py | 1 | 7943 | import string,json,logging
from afs.model.Volume import Volume
from afs.model.ExtendedVolumeAttributes import ExtVolAttr
from afs.service.BaseService import BaseService, task_wrapper
from afs.service.VolumeServiceError import VolumeServiceError
from afs.util import misc
import afs
class VolumeService (BaseService):
"""
Provides Service about a Volume management.
The cellname is set in the methods so that we
can use this for more than one cell.
"""
def __init__(self, conf=None):
BaseService.__init__(self, conf, LLAList=["fs", "vol"])
def get_object(self, obj_or_param) :
"""
return an object, regardless of what was the input (object or unique parameters)
unique paramter is name_or_id (either volume name or volume id)
"""
if isinstance(obj_or_param, Volume) :
this_Volume = obj_or_param
else :
this_Volume = Volume()
if misc.is_name(obj_or_param) :
this_Volume.name = obj_or_param
else :
this_Volume.vid = obj_or_param
return this_Volume
@task_wrapper
def get_volume_group(self, obj_or_param, _thread_name="", _user="", cached=True, async=False):
"""
Retrieve Volume Group.
Returns dict "RW": RWVolObj, "RO": [ROVolObj1, ROVolObj2, ...]
"""
self.Logger.debug("get_volume_group: entering with obj_or_param=%s" % obj_or_param)
this_Volume = self.get_object(obj_or_param)
volume_group = {"RW" : None, "RO" : [], "BK": None}
if cached :
if this_Volume.name != "" :
volume_list = self.DBManager.get_from_cache(Volume, name=this_Volume.name, must_be_unique=False)
else :
volume_list = self.DBManager.get_from_cache(Volume, vid=this_Volume.vid, must_be_unique=False)
self.Logger.debug("volume_list=%s" % volume_list)
if volume_list != [] and volume_list != None :
parent_id = volume_list[0].parent_id
volume_list = self.DBManager.get_from_cache(Volume, parent_id=parent_id, must_be_unique=False)
for v in volume_list :
if v.type == "RW" :
volume_group["RW"] = v
elif v.type == "RO" :
volume_group["RO"].append(v)
elif v.type == "BK" :
volume_group["BK"] = v
else :
raise VolumeServiceError("get_volume_group: invalid volume type encountered: %s" % v.type)
return self.do_return(_thread_name, volume_group)
self.Logger.info("found no VolumeGroup for obj_or_param %s in cache. Trying live-system." % obj_or_param)
vol = self._volLLA.examine(this_Volume, _cfg=self._CFG, _user=_user)
if vol == None :
self.Logger.debug("get_volume_group: returning live: None")
return self.do_return(_thread_name, None)
vol = vol[0]
self.Logger.debug("get_volume_group: got vol=%s" % vol)
# depending on what we got fill in the others
rw_vol = self.get_object(vol.parent_id)
ro_vol = self.get_object(vol.readonly_id)
bk_vol = self.get_object(vol.backup_id)
if vol.parent_id != 0 :
volume_group["RW"] = self._volLLA.examine(rw_vol, _cfg=self._CFG, _user=_user)[0]
volume_group["RW"].fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(volume_group["RW"].servername, self._CFG)
if vol.readonly_id != 0 :
volume_group["RO"] = self._volLLA.examine(ro_vol, _cfg=self._CFG, _user=_user)
for volume in volume_group["RO"] :
volume.fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(volume.servername, self._CFG)
if vol.backup_id != 0 :
volume_group["BK"].fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(volume_group["BK"].servername, self._CFG)
volume_group["BK"] = self._volLLA.examine(bk_vol, _cfg=self._CFG, _user=_user)[0]
if self._CFG.DB_CACHE :
if volume_group["RW"] != None :
self.DBManager.set_into_cache(Volume, volume_group["RW"], vid=volume_group["RW"].vid, fileserver_uuid=volume_group["RW"].fileserver_uuid)
for volume in volume_group["RO"] :
self.DBManager.set_into_cache(Volume, volume, vid=volume.vid, fileserver_uuid=volume.fileserver_uuid)
if volume_group["BK"] != None :
self.DBManager.set_into_cache(Volume, volume_group["BK"], vid=volume_group["BK"].vid, fileserver_uuid=volume_group["BK"].fileserver_uuid)
self.Logger.debug("get_volume_group: returning: %s " % (volume_group))
return self.do_return(_thread_name, volume_group)
@task_wrapper
def get_volume(self, obj_or_param, fileserver="", _thread_name="", _user="", cached=True, async=False):
"""
Retrieve Volume Information by Name or ID
Always return a list
"""
this_Volume = self.get_object(obj_or_param)
if fileserver != "" :
wanted_fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(fileserver, self._CFG)
else : # make it an invalid UUID
wanted_fileserver_uuid = "XXX"
self.Logger.debug("get_volume: called with obj_or_param=%s, fileserver=%s->wanted_fileserver_uuid=%s, _user=%s" % (obj_or_param, fileserver, wanted_fileserver_uuid, _user))
if cached :
if fileserver != "" :
if this_Volume.name != "" :
volume_list = self.DBManager.get_from_cache(Volume, name=this_Volume.name, fileserver_uuid=wanted_fileserver_uuid, must_be_unique=False)
else :
volume_list = self.DBManager.get_from_cache(Volume, vid=this_Volume.vid, fileserver_uuid=wanted_fileserver_uuid, must_be_unique=False)
else :
if this_Volume.name != "" :
volume_list = self.DBManager.get_from_cache(Volume, name=this_Volume.name, must_be_unique=False)
else :
volume_list = self.DBManager.get_from_cache(Volume, vid=this_Volume.vid, must_be_unique=False)
if volume_list != [] and volume_list != None :
return self.do_return(_thread_name, volume_list)
volume_list = self._volLLA.examine(this_Volume, _cfg=self._CFG, _user=_user)
self.Logger.debug("get_volume: got volume_list from LLA : %s" % volume_list)
if volume_list == None :
return self.do_return(_thread_name, None)
to_be_removed = []
for volume in volume_list :
volume.fileserver_uuid = afs.LOOKUP_UTIL[self._CFG.cell].get_fsuuid(volume.servername, self._CFG)
if volume.fileserver_uuid != wanted_fileserver_uuid and wanted_fileserver_uuid != "XXX" :
to_be_removed.append(volume)
for volume in to_be_removed :
volume_list.remove(volume)
self.Logger.debug("get_volume: v=%s" % volume)
# XXX Need a sync mechanism, in order to update Volume-entries, (since we only update if a volume has not been moved
# to another server)
if self._CFG.DB_CACHE :
for volume in volume_list :
self.DBManager.set_into_cache(Volume, volume, vid=volume.vid, fileserver_uuid=volume.fileserver_uuid)
return self.do_return(_thread_name, volume_list)
def get_extended_volume_attributes(self, vid) :
cachedi_obj = self.DBManager.get_from_cache(ExtVolAttr, vid=vid)
return cached_obj
def save_extended_volume_attributes(self, Obj):
cached_obj = self.DBManager.set_into_cache(ExtVolAttr, Obj, vid=Obj.vid)
return cached_obj
| bsd-2-clause | 1,151,720,227,587,487,700 | 49.916667 | 180 | 0.590331 | false |
chunshen1987/iSS | utilities/filter_SMASH_pdg.py | 1 | 1948 | #!/usr/bin/env python
import numpy as np
import sys
if len(sys.argv) < 2:
print("{} filename".format(sys.argv[0]))
exit()
filename = str(sys.argv[1])
f = open(filename, "r")
SMASH_table = f.read().split("\n")
f.close()
pdg_list = []
mass_list = []
header_list = []
decay_list = []
iline = 0
while iline < len(SMASH_table) - 1:
pdg_list.append(SMASH_table[iline].split()[0])
header_list.append(SMASH_table[iline])
mass = float(SMASH_table[iline].split()[2])
mass_list.append(mass)
num_decay_channels = int(SMASH_table[iline].split()[-1])
decay_block = []
for i in range(num_decay_channels):
iline += 1
decay_block.append(SMASH_table[iline])
decay_list.append(decay_block)
iline += 1
mass_list = np.array(mass_list)
indx_list = np.argsort(mass_list)
pdg_list_wbbar = []
f = open("pdg-SMASH.dat", "w")
for idx in indx_list:
ipart = int(pdg_list[idx])
pdg_code_dissected = [int(d) for d in str(abs(ipart))]
# here are filters
if len(pdg_code_dissected) < 3:
# photon
continue
if pdg_code_dissected[-2] > 3 or pdg_code_dissected[-3] > 3:
# heavy quark mesons or baryons
continue
if len(pdg_code_dissected) > 3 and pdg_code_dissected[-4] > 3:
# heavy quark baryons
continue
# passed all the filters, add it to the final pdg list
pdg_list_wbbar.append(ipart)
if len(pdg_code_dissected) > 3 and pdg_code_dissected[-4] != 0:
# it is baryons: we need to add anti-baryons in the final pdg list
if ipart < 0:
print("something wrong!")
exit(1)
pdg_list_wbbar.append(-ipart)
f.write("{0}\n".format(header_list[idx]))
for iline in range(len(decay_list[idx])):
f.write("{0}\n".format(decay_list[idx][iline]))
f.close()
f2 = open("chosen_particles_SMASH.dat", "w")
for ipart in pdg_list_wbbar:
f2.write("{0}\n".format(ipart))
f2.close()
| mit | 2,562,141,826,844,263,400 | 25.684932 | 74 | 0.61037 | false |
vntarasov/openpilot | common/kalman/tests/test_simple_kalman.py | 1 | 2237 | import unittest
import random
import timeit
import numpy as np
from common.kalman.simple_kalman import KF1D
from common.kalman.simple_kalman_old import KF1D as KF1D_old
class TestSimpleKalman(unittest.TestCase):
def setUp(self):
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
A0_0 = 1.0
A0_1 = dt
A1_0 = 0.0
A1_1 = 1.0
C0_0 = 1.0
C0_1 = 0.0
K0_0 = 0.12287673
K1_0 = 0.29666309
self.kf_old = KF1D_old(x0=np.array([[x0_0], [x1_0]]),
A=np.array([[A0_0, A0_1], [A1_0, A1_1]]),
C=np.array([C0_0, C0_1]),
K=np.array([[K0_0], [K1_0]]))
self.kf = KF1D(x0=[[x0_0], [x1_0]],
A=[[A0_0, A0_1], [A1_0, A1_1]],
C=[C0_0, C0_1],
K=[[K0_0], [K1_0]])
def test_getter_setter(self):
self.kf.x = [[1.0], [1.0]]
self.assertEqual(self.kf.x, [[1.0], [1.0]])
def update_returns_state(self):
x = self.kf.update(100)
self.assertEqual(x, self.kf.x)
def test_old_equal_new(self):
for _ in range(1000):
v_wheel = random.uniform(0, 200)
x_old = self.kf_old.update(v_wheel)
x = self.kf.update(v_wheel)
# Compare the output x, verify that the error is less than 1e-4
np.testing.assert_almost_equal(x_old[0], x[0])
np.testing.assert_almost_equal(x_old[1], x[1])
def test_new_is_faster(self):
setup = """
import numpy as np
from common.kalman.simple_kalman import KF1D
from common.kalman.simple_kalman_old import KF1D as KF1D_old
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
A0_0 = 1.0
A0_1 = dt
A1_0 = 0.0
A1_1 = 1.0
C0_0 = 1.0
C0_1 = 0.0
K0_0 = 0.12287673
K1_0 = 0.29666309
kf_old = KF1D_old(x0=np.array([[x0_0], [x1_0]]),
A=np.array([[A0_0, A0_1], [A1_0, A1_1]]),
C=np.array([C0_0, C0_1]),
K=np.array([[K0_0], [K1_0]]))
kf = KF1D(x0=[[x0_0], [x1_0]],
A=[[A0_0, A0_1], [A1_0, A1_1]],
C=[C0_0, C0_1],
K=[[K0_0], [K1_0]])
"""
kf_speed = timeit.timeit("kf.update(1234)", setup=setup, number=10000)
kf_old_speed = timeit.timeit("kf_old.update(1234)", setup=setup, number=10000)
self.assertTrue(kf_speed < kf_old_speed / 4)
| mit | -4,189,161,890,279,061,000 | 25.630952 | 82 | 0.527939 | false |
googleapis/python-analytics-data | samples/snippets/run_report_with_dimension_filter.py | 1 | 2783 | #!/usr/bin/env python
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Analytics Data API sample application demonstrating the usage of
dimension and metric filters in a report.
See https://developers.google.com/analytics/devguides/reporting/data/v1/rest/v1beta/properties/runReport#body.request_body.FIELDS.dimension_filter
for more information.
"""
# [START analyticsdata_run_report_with_dimension_filter]
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Filter
from google.analytics.data_v1beta.types import FilterExpression
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
from run_report import print_run_report_response
def run_sample():
"""Runs the sample."""
# TODO(developer): Replace this variable with your Google Analytics 4
# property ID before running the sample.
property_id = "YOUR-GA4-PROPERTY-ID"
run_report_with_dimension_filter(property_id)
def run_report_with_dimension_filter(property_id="YOUR-GA4-PROPERTY-ID"):
"""Runs a report using a dimension filter. The call returns a time series
report of `eventCount` when `eventName` is `first_open` for each date.
This sample uses relative date range values. See https://developers.google.com/analytics/devguides/reporting/data/v1/rest/v1beta/DateRange
for more information.
"""
client = BetaAnalyticsDataClient()
request = RunReportRequest(
property=f"properties/{property_id}",
dimensions=[Dimension(name="date")],
metrics=[Metric(name="eventCount")],
date_ranges=[DateRange(start_date="7daysAgo", end_date="yesterday")],
dimension_filter=FilterExpression(
filter=Filter(
field_name="eventName",
string_filter=Filter.StringFilter(value="first_open"),
)
),
)
response = client.run_report(request)
print_run_report_response(response)
# [END analyticsdata_run_report_with_dimension_filter]
if __name__ == "__main__":
run_sample()
| apache-2.0 | 450,873,576,677,864,000 | 37.123288 | 146 | 0.736974 | false |
butozerca/fireplace | fireplace/cards/blackrock/collectible.py | 1 | 3004 | from ..utils import *
##
# Minions
# Flamewaker
class BRM_002:
events = OWN_SPELL_PLAY.after(Hit(RANDOM_ENEMY_MINION, 1) * 2)
# Twilight Whelp
class BRM_004:
play = HOLDING_DRAGON & Buff(SELF, "BRM_004e")
# Imp Gang Boss
class BRM_006:
events = SELF_DAMAGE.on(Summon(CONTROLLER, "BRM_006t"))
# Dark Iron Skulker
class BRM_008:
play = Hit(ENEMY_MINIONS - DAMAGED, 2)
# Volcanic Lumberer
class BRM_009:
cost = lambda self, i: i - len(self.game.minions_killed_this_turn)
# Druid of the Flame (Firecat Form)
class BRM_010a:
play = Morph(SELF, "BRM_010t")
# Druid of the Flame (Firehawk Form)
class BRM_010b:
play = Morph(SELF, "BRM_010t2")
# Fireguard Destroyer
class BRM_012:
play = Buff(SELF, "BRM_012e") * RandomNumber(1, 2, 3, 4)
# Core Rager
class BRM_014:
play = Find(CONTROLLER_HAND) | Buff(SELF, "BRM_014e")
# Axe Flinger
class BRM_016:
events = SELF_DAMAGE.on(Hit(ENEMY_HERO, 2))
# Grim Patron
class BRM_019:
events = SELF_DAMAGE.on(Dead(SELF) | Summon(CONTROLLER, "BRM_019"))
# Dragonkin Sorcerer
class BRM_020:
events = Play(CONTROLLER, SPELL, SELF).on(Buff(SELF, "BRM_020e"))
# Dragon Egg
class BRM_022:
events = SELF_DAMAGE.on(Summon(CONTROLLER, "BRM_022t"))
# Drakonid Crusher
class BRM_024:
play = (Attr(ENEMY_HERO, "health") <= 15) & Buff(SELF, "BRM_024e")
# Volcanic Drake
class BRM_025:
cost = lambda self, i: i - len(self.game.minions_killed_this_turn)
# Hungry Dragon
class BRM_026:
play = Summon(OPPONENT, RandomMinion(cost=1))
# Majordomo Executus
class BRM_027:
deathrattle = Summon(CONTROLLER, "BRM_027h"), Summon(CONTROLLER, "BRM_027p")
# DIE, INSECT!
class BRM_027p:
activate = Hit(RANDOM_ENEMY_CHARACTER, 8)
# DIE, INSECTS!
class BRM_027pH:
activate = Hit(RANDOM_ENEMY_CHARACTER, 8) * 2
# Emperor Thaurissan
class BRM_028:
events = OWN_TURN_END.on(Buff(CONTROLLER_HAND, "BRM_028e"))
# Rend Blackhand
class BRM_029:
play = HOLDING_DRAGON & Destroy(TARGET)
# Chromaggus
class BRM_031:
events = Draw(CONTROLLER).on(Give(CONTROLLER, Copy(Draw.Args.CARD)))
# Blackwing Technician
class BRM_033:
play = HOLDING_DRAGON & Buff(SELF, "BRM_033e")
# Blackwing Corruptor
class BRM_034:
play = HOLDING_DRAGON & Hit(TARGET, 3)
##
# Spells
# Solemn Vigil
class BRM_001:
play = Draw(CONTROLLER) * 2
cost = lambda self, i: i - len(self.game.minions_killed_this_turn)
# Dragon's Breath
class BRM_003:
play = Hit(TARGET, 4)
cost = lambda self, i: i - len(self.game.minions_killed_this_turn)
# Demonwrath
class BRM_005:
play = Hit(ALL_MINIONS - DEMON, 2)
# Gang Up
class BRM_007:
play = Shuffle(CONTROLLER, Copy(TARGET)) * 3
# Lava Shock
class BRM_011:
play = Hit(TARGET, 2), UnlockOverload(CONTROLLER)
# Quick Shot
class BRM_013:
play = Hit(TARGET, 3), Find(CONTROLLER_HAND) | Draw(CONTROLLER)
# Revenge
class BRM_015:
play = (Attr(FRIENDLY_HERO, "health") <= 12) & Hit(ALL_MINIONS, 3) | Hit(ALL_MINIONS, 1)
# Resurrect
class BRM_017:
play = Summon(CONTROLLER, Copy(RANDOM(FRIENDLY + KILLED + MINION)))
| agpl-3.0 | -551,028,235,361,436,000 | 17.096386 | 89 | 0.691744 | false |
centaurialpha/pireal | tests/interpreter/test_parser.py | 1 | 4782 | import pytest
# from pireal.core.interpreter import scanner
# from pireal.core.interpreter import lexer
from pireal.core.interpreter import parser
# from pireal.core.interpreter import rast as ast
# @pytest.fixture
# def fixture_parser():
# def _make_parser(text):
# sc = scanner.Scanner(text)
# lex = lexer.Lexer(sc)
# par = parser.Parser(lex)
# return par
# return _make_parser
# def test_parser_select_expression(fixture_parser):
# p = fixture_parser('q1 := select id=1 (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query, ast.SelectExpr)
# def test_parser_project_expression(fixture_parser):
# p = fixture_parser('q1 := project a, b, c (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query, ast.ProjectExpr)
# def test_parser_binary_expression(fixture_parser):
# p = fixture_parser('q1 := a intersect b;')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query, ast.BinaryOp)
# def test_parser_condition(fixture_parser):
# p = fixture_parser('q1 := select i<2 (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query.condition, ast.Condition)
# def test_string_node(fixture_parser):
# p = fixture_parser('q1 := select name=\'gabo\' (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query.condition.op2, ast.String)
# @pytest.mark.parametrize(
# 'date',
# [
# ('20/01/1991',),
# ('1991/01/20')
# ]
# )
# def test_date_node(fixture_parser, date):
# p = fixture_parser('q1 := select date=\'%s\' (p);' % date)
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query.condition.op2, ast.Date)
# def test_date_failure():
# with pytest.raises(ast.DateFormatError):
# ast.parse_date('01/20/1991')
# def test_time_node(fixture_parser):
# p = fixture_parser('q1 := select hour=\'20:15\' (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query.condition.op2, ast.Time)
# def test_bool_node(fixture_parser):
# p = fixture_parser('q1 := select edad < 20 or edad > 10 (p);')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query.condition, ast.BoolOp)
# assert 'or' in c.query.condition.ops
# for c, c2 in zip(['<', '>'], c.query.condition.conditions):
# assert c == c2.operator.value
# def test_expression_node(fixture_parser):
# p = fixture_parser('q1 := (project a,b (select id=1 (p)));')
# tree = p.parse()
# assert isinstance(tree, ast.Compound)
# for c in tree.children:
# assert isinstance(c, ast.Assignment)
# assert isinstance(c.query, ast.ProjectExpr)
# assert isinstance(c.query.expr, ast.SelectExpr)
# # @pytest.mark.parametrize(
# # 'query',
# # [
# # ('q1:=')
# # ]
# # )
# # FIXME: parametrizar esto
# @pytest.mark.parametrize(
# 'query, consumed',
# [
# ('q1 :=', (lexer.ID, lexer.ASSIGNMENT)),
# ('select id=', (lexer.KEYWORDS['select'], lexer.ID, lexer.EQUAL)),
# ('project a,b', (lexer.KEYWORDS['project'], lexer.ID, lexer.SEMI, lexer.ID))
# ]
# )
# def test_consume(fixture_parser, query, consumed):
# p = fixture_parser(query)
# for token_to_consume in consumed:
# p.consume(token_to_consume)
# def test_consume_error(fixture_parser):
# p = fixture_parser('q1 :=')
# p.consume(lexer.ID)
# with pytest.raises(parser.ConsumeError):
# p.consume(lexer.SEMI)
# def test_variable(fixture_parser):
# p = fixture_parser('q1 :=')
# node = p._variable()
# assert isinstance(node, parser.Variable)
# assert node.token.type == lexer.ID
# assert node.token.value == 'q1'
# def test_condition(fixture_parser):
# p = fixture_parser("name='gabo'")
# node = p._condition()
# assert isinstance(node, parser.Condition)
| gpl-3.0 | 1,665,728,800,564,844,800 | 29.458599 | 86 | 0.611251 | false |
joaomoreno/facilis | facilis/core/app.py | 1 | 4753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Facilis
# João Moreno <http://www.joaomoreno.com/>
# GPLv3
import os, yaml, sys, clipboard
from urllib2 import urlopen, HTTPError
from re import search
from web import ServerHandler
from db import FilesDatabase
from misc import PortInUse
from threading import Event, Thread
class FacilisApp(object):
def __init__(self):
"""Load options and start app."""
# Load configuration stuff
self.configFile = self.__configFile()
self.__loadConfig()
self.updateIP()
self.ip_monitor = IPMonitor(self, 5 * 60)
self.ip_monitor.start()
self.db = FilesDatabase()
# Load server
self.server = ServerHandler(self, self.config['port'])
def updateIP(self):
"""Stores the outer IP of this network / machine."""
try:
ip = urlopen('http://checkip.dyndns.org/').read()
self.ip = search(r'(\d+.\d+.\d+.\d+)', ip).group()
except HTTPError:
import socket
self.ip = socket.gethostbyname(socket.gethostname())
def start(self):
"""Starts the application."""
# Start the server (threaded)
self.server.start()
self.server.join(0.5)
if not self.server.isAlive():
raise PortInUse, self.config['port']
def kill(self):
"""Called by the interface to kill the application."""
self.ip_monitor.kill()
exit(0)
def __loadConfig(self):
"""Loads the config file and stores it in self.config."""
try:
f = file(self.configFile, 'r')
self.config = yaml.load(f)
f.close()
except:
# Default Configuration
self.config = {
'port': 4242,
'domain': None,
'use_domain': False
}
self.__saveConfig()
def __saveConfig(self):
"""Saves the config in self.config to the config file."""
f = file(self.configFile, 'w')
yaml.dump(self.config, f)
f.close()
def __configFile(self):
"""Returns the configuration filename."""
d = self.__userDir()
if not os.path.exists(d):
os.mkdir(d)
return d + os.sep + "config.yml"
def __userDir(self):
"""Returns the user configuration directory. Adapted from http://tinyurl.com/6hk5vz."""
try:
from win32com.shell import shell, shellcon
except ImportError:
shell = None
try:
import _winreg
except ImportError:
_winreg = None
if shell:
return shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0) + os.sep + "Facilis"
if _winreg:
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
try:
return _winreg.QueryValueEx( k, "AppData" )[0] + os.sep + "Facilis"
finally:
_winreg.CloseKey( k )
for name in ['appdata', 'home']:
if os.environ.has_key( name ):
return os.environ[name] + os.sep + ".facilis"
possible = os.path.abspath(os.path.expanduser( '~/' ))
if os.path.exists( possible ):
return possible + os.sep + ".facilis"
raise OSError( "Unable to determine user's application-data directory")
def setPort(self,p):
self.config['port'] = p
self.__saveConfig()
def setDomain(self,d):
self.config['domain'] = d
self.__saveConfig()
def useDomain(self,u):
self.config['use_domain'] = u
self.__saveConfig()
def addFile(self, fname):
"""Add a file to share. Trows IOError in case file does not exist or is unreadable."""
h = self.db.addFile(fname)
url = self.getUrl(h)
clipboard.copy(url)
return url
def getFile(self, url):
return self.db.getFile(url)
def getUrl(self, hash=''):
return "http://%s:%i/%s\n" % (self.config['domain'] if self.config['use_domain'] else self.ip, self.config['port'], hash)
class IPMonitor(object):
def __init__(self, app, interval):
self.app = app
self.e = Event()
self.t = Thread(target=self.__repeat)
self.i = interval
def start(self):
self.t.start()
def kill(self):
self.e.set()
self.t.join()
def __repeat(self):
while True:
self.e.wait(self.i)
if self.e.isSet():
break
self.app.updateIP()
| gpl-3.0 | 7,259,759,753,931,775,000 | 29.267516 | 129 | 0.538089 | false |
aymeric-spiga/planetoplot | examples/ppclass_additional/roughness_hist.py | 1 | 1177 | #! /usr/bin/env python
from ppclass import pp
import matplotlib.pyplot as mpl
import numpy as np
import ppplot
u = pp()
#u.file = "/home/aymeric/Big_Data/ustar.101x101x201.CaseA.w30_zipbl.nc"
u.file = "BIGLES10m_wind5_USTM_9-11.nc"
u.var = "USTM"
u.x = "0,1000"
u.y = "0,1000"
tttall = "0,1e10"
for yeah in [tttall]:
#for yeah in ["0"]:
u.t = yeah
u.compute = "nothing"
ustm = u.getf()
u.compute = "max" ; zemax = u.getf()
u.compute = "min" ; zemin = u.getf()
u.compute = "mean" ; zemean = u.getf()
ppplot.figuref(x=4,y=4)
dval = 0.05
bins = np.arange(zemin,zemax,dval)
hh = mpl.hist(np.ravel(ustm),bins,log=True)
print hh
mpl.title("$\mu$=%.2f / m=%.2f / M=%.2f" % (zemean,zemin,zemax))
mpl.xlabel('Friction velocity $u_{\star}$ (m s$^{-1}$)')
ppplot.save(mode="png",filename="roughness_hist")
ppplot.close()
u.x = None
u.y = None
u.t = tttall
u.compute = "max"
u.xcoeff = 0.01
u.ycoeff = 0.01
u.xlabel = "x (km)"
u.ylabel = "y (km)"
u.title = 'maximum $u\star$'
u.vmin = 0.4
u.vmax = 1.1
u.div = 70
u.colorbar = "gist_ncar" #"CMRmap"
u.fmt = "%.3f"
u.xp = 10
u.yp = 8
u.filename = "maxustm"
u.includedate = False
u.out = "png"
u.getplot()
| gpl-2.0 | 5,804,368,510,208,841,000 | 20.4 | 71 | 0.615973 | false |
google-research/language | language/tek_representations/preprocess/prepare_mrqa_data.py | 1 | 3661 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converts an mrqa dataset file to tf examples.
Notes:
- this program needs to be run for every mrqa training shard.
- this program only outputs the first n top level contexts for every example,
where n is set through --max_contexts.
- the restriction from --max_contexts is such that the annotated context might
not be present in the output examples. --max_contexts=8 leads to about
85% of examples containing the correct context. --max_contexts=48 leads to
about 97% of examples containing the correct context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
from language.tek_representations import run_mrqa
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"is_training", True,
"Whether to prepare features for training or for evaluation. Eval features "
"don't include gold labels, but include wordpiece to html token maps.")
flags.DEFINE_integer(
"max_examples", 0,
"If positive, stop once these many examples have been converted.")
flags.DEFINE_string("split", "train",
"Train and dev split to read from and write to.")
flags.DEFINE_string("input_data_dir", "", "input_data_dir")
flags.DEFINE_string("output_data_dir", "", "output_data_dir")
flags.DEFINE_integer("n_shards", 50, "number of shards for this split")
def get_examples(input_jsonl_pattern):
for input_path in tf.gfile.Glob(input_jsonl_pattern):
with tf.gfile.Open(input_path) as input_file:
for line in input_file:
yield json.loads(line)
def get_shard():
return "%05d-of-%05d" % (FLAGS.task_id, FLAGS.n_shards)
def main(_):
examples_processed = 0
creator_fn = run_mrqa.CreateTFExampleFn(is_training=FLAGS.is_training)
instances = []
input_file = os.path.join(
FLAGS.input_data_dir,
"%s.jsonl-%s" % (FLAGS.split, get_shard()))
for example in get_examples(input_file):
for instance in creator_fn.process(example):
instances.append(instance)
if examples_processed % 100 == 0:
tf.logging.info("Examples processed: %d", examples_processed)
examples_processed += 1
if FLAGS.max_examples > 0 and examples_processed >= FLAGS.max_examples:
break
random.shuffle(instances)
tf.logging.info("Total no: of instances in current shard: %d", len(instances))
rec_output_file = os.path.join(FLAGS.output_data_dir,
"%s.tfrecord-%s" % (FLAGS.split, get_shard()))
with tf.python_io.TFRecordWriter(rec_output_file) as writer:
for instance, _ in instances:
writer.write(instance)
if not FLAGS.is_training:
fea_output_file = os.path.join(
FLAGS.output_data_dir,
"%s.features.jsonl-%s" % (FLAGS.split, get_shard()))
with tf.gfile.Open(fea_output_file, "w") as writer:
for _, instance in instances:
writer.write(instance)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
tf.app.run()
| apache-2.0 | -3,192,057,876,285,266,400 | 33.537736 | 80 | 0.698443 | false |
CanonicalLtd/landscape-client | landscape/client/monitor/config.py | 1 | 1203 | from landscape.client.deployment import Configuration
ALL_PLUGINS = ["ActiveProcessInfo", "ComputerInfo",
"LoadAverage", "MemoryInfo", "MountInfo", "ProcessorInfo",
"Temperature", "PackageMonitor", "UserMonitor",
"RebootRequired", "AptPreferences", "NetworkActivity",
"NetworkDevice", "UpdateManager", "CPUUsage", "SwiftUsage",
"CephUsage"]
class MonitorConfiguration(Configuration):
"""Specialized configuration for the Landscape Monitor."""
def make_parser(self):
"""
Specialize L{Configuration.make_parser}, adding many
monitor-specific options.
"""
parser = super(MonitorConfiguration, self).make_parser()
parser.add_option("--monitor-plugins", metavar="PLUGIN_LIST",
help="Comma-delimited list of monitor plugins to "
"use. ALL means use all plugins.",
default="ALL")
return parser
@property
def plugin_factories(self):
if self.monitor_plugins == "ALL":
return ALL_PLUGINS
return [x.strip() for x in self.monitor_plugins.split(",")]
| gpl-2.0 | -1,557,809,933,172,004,000 | 36.59375 | 76 | 0.595179 | false |
pf4d/dolfin-adjoint | tests_dolfin/mantle_convection/timings/adjoint.py | 1 | 6534 | __author__ = "Lyudmyla Vynnytska and Marie E. Rognes"
__copyright__ = "Copyright (C) 2011 Simula Research Laboratory and %s" % __author__
__license__ = "GNU LGPL Version 3 or any later version"
# Last changed: 2012-02-14
import time
import numpy
import sys
import os
import math
sys.path.insert(0, '..')
from stokes import *
from composition import *
from temperature import *
from parameters import InitialTemperature, Ra, Rb, rho0, g
from parameters import eta0, b_val, c_val, deltaT
from dolfin import *; import dolfin
from dolfin_adjoint import *
dolfin.parameters["adjoint"]["fussy_replay"] = False
dolfin.parameters["adjoint"]["record_all"] = True
dolfin.parameters["form_compiler"]["representation"] = "quadrature"
def viscosity(T):
eta = eta0 * exp(-b_val*T/deltaT + c_val*(1.0 - triangle.x[1])/height )
return eta
def store(T, w, t):
temperature_series << (T, t)
flow_series << (w, t)
def message(t, dt):
print "\n" + "-"*60
print "t = %0.5g" % t
print "dt = %0.5g" % dt
os.system("date")
print "-"*60
def compute_timestep(w):
#(u, p) = w.split(deepcopy=True)
# maxvel = numpy.max(numpy.abs(u.vector().array()))
# mesh = u.function_space().mesh()
# hmin = mesh.hmin()
# dt = CLFnum*hmin/maxvel
dt = constant_dt
return dt
def compute_initial_conditions(T_, W, Q, bcs, annotate):
# Solve Stokes problem with given initial temperature and
# composition
eta = viscosity(T_)
(a, L, pre) = momentum(W, eta, (Ra*T_)*g)
w = Function(W)
P = PETScMatrix()
assemble(pre, tensor=P); [bc.apply(P) for bc in bcs]
solve(a == L, w, bcs=bcs, solver_parameters={"linear_solver": "tfqmr", "preconditioner": "amg"}, annotate=annotate)
return (w, P)
parameters["form_compiler"]["cpp_optimize"] = True
# Define spatial domain
height = 1.0
length = 2.0
nx = 10
ny = 10
mesh = Rectangle(0, 0, length, height, nx, ny)
# Containers for storage
flow_series = File("bin-final/flow.pvd", "compressed")
temperature_series = File("bin-final/temperature.pvd", "compressed")
# Create function spaces
W = stokes_space(mesh)
V = W.sub(0).collapse()
Q = FunctionSpace(mesh, "DG", 1)
print "Number of degrees of freedom:", (W*Q).dim()
# Define boundary conditions for the temperature
top_temperature = DirichletBC(Q, 0.0, "x[1] == %g" % height, "geometric")
bottom_temperature = DirichletBC(Q, 1.0, "x[1] == 0.0", "geometric")
T_bcs = [bottom_temperature, top_temperature]
constant_dt = 3.0e-5
finish = constant_dt * 10
def main(T_, annotate=False):
# Define initial and end time
t = 0.0
# Define boundary conditions for the velocity and pressure u
bottom = DirichletBC(W.sub(0), (0.0, 0.0), "x[1] == 0.0" )
top = DirichletBC(W.sub(0).sub(1), 0.0, "x[1] == %g" % height)
left = DirichletBC(W.sub(0).sub(0), 0.0, "x[0] == 0.0")
right = DirichletBC(W.sub(0).sub(0), 0.0, "x[0] == %g" % length)
bcs = [bottom, top, left, right]
rho = interpolate(rho0, Q)
# Functions at previous timestep (and initial conditions)
(w_, P) = compute_initial_conditions(T_, W, Q, bcs, annotate=annotate)
# Predictor functions
T_pr = Function(Q) # Tentative temperature (T)
# Functions at this timestep
T = Function(Q) # Temperature (T) at this time step
w = Function(W)
# Store initial data
if annotate:
store(T_, w_, 0.0)
# Define initial CLF and time step
CLFnum = 0.5
dt = compute_timestep(w_)
t += dt
n = 1
w_pr = Function(W)
(u_pr, p_pr) = split(w_pr)
(u_, p_) = split(w_)
# Solver for the Stokes systems
solver = AdjointPETScKrylovSolver("gmres", "amg")
solver.parameters["relative_tolerance"] = 1.0e-14
solver.parameters["monitor_convergence"] = False
while (t <= finish):
message(t, dt)
# Solve for predicted temperature in terms of previous velocity
(a, L) = energy(Q, Constant(dt), u_, T_)
solve(a == L, T_pr, T_bcs, solver_parameters={"krylov_solver": {"relative_tolerance": 1.0e-14}}, annotate=annotate)
# Solve for predicted flow
eta = viscosity(T_pr)
(a, L, precond) = momentum(W, eta, (Ra*T_pr)*g)
b = assemble(L); [bc.apply(b) for bc in bcs]
A = AdjointKrylovMatrix(a, bcs=bcs)
solver.set_operators(A, P)
solver.solve(w_pr.vector(), b, annotate=annotate)
#solve(a == L, w_pr, bcs, solver_parameters={"krylov_solver": {"relative_tolerance": 1.0e-14}}, annotate=annotate)
# Solve for corrected temperature T in terms of predicted and previous velocity
(a, L) = energy_correction(Q, Constant(dt), u_pr, u_, T_)
solve(a == L, T, T_bcs, annotate=annotate, solver_parameters={"krylov_solver": {"relative_tolerance": 1.0e-14}})
# Solve for corrected flow
eta = viscosity(T)
(a, L, precond) = momentum(W, eta, (Ra*T)*g)
b = assemble(L); [bc.apply(b) for bc in bcs]
A = AdjointKrylovMatrix(a, bcs=bcs)
solver.set_operators(A, P)
solver.solve(w.vector(), b, annotate=annotate)
#solve(a == L, w, bcs, solver_parameters={"krylov_solver": {"relative_tolerance": 1.0e-14}}, annotate=annotate)
# Store stuff
if annotate:
store(T, w, t)
# Compute time step
dt = compute_timestep(w)
# Move to new timestep and update functions
T_.assign(T)
w_.assign(w)
t += dt
n += 1
adj_inc_timestep()
return T_
def Nusselt():
"Definition of Nusselt number, cf Blankenbach et al 1989"
# Define markers (2) for top boundary, remaining facets are marked
# by 0
markers = FacetFunction("uint", mesh)
markers.set_all(0)
top = compile_subdomains("near(x[1], %s)" % height)
top.mark(markers, 2)
ds = Measure("ds")[markers]
# Compute \int_bottom T apriori:
Nu2 = deltaT*length
return (ds(2), Nu2)
# Define nusselt number
#Nu = - (1.0/Nu2)*grad(T)[1]*ds(2)
#return Nu
if __name__ == "__main__":
Tic = interpolate(InitialTemperature(Ra, length), Q)
ic_copy = Function(Tic)
another_copy = Function(Tic)
Tfinal = main(Tic, annotate=True)
(ds2, Nu2) = Nusselt()
print "Timings of forward model: "
list_timings(True)
J = FinalFunctional(-(1.0/Nu2)*grad(Tfinal)[1]*ds2)
#J = FinalFunctional(inner(Tfinal, Tfinal)*dx)
for (adjoint, var) in compute_adjoint(J, forget=False):
pass
print "Timings of adjoint model: "
list_timings(True)
| lgpl-3.0 | -7,254,148,762,428,801,000 | 28.565611 | 123 | 0.617539 | false |
pegler/pytzwhere | tests/test_locations.py | 1 | 4565 | from tzwhere import tzwhere
import datetime
import unittest
class LocationTestCase(unittest.TestCase):
TEST_LOCATIONS = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 44.12, -123.22, 'Eugene, OR', 'America/Los_Angeles'),
( 42.652647, -73.756371, 'Albany, NY', 'America/New_York'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 34.104255, -118.4055591, 'Los Angeles', 'America/Los_Angeles'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 39.194991, -106.8294024, 'Aspen, Colorado', 'America/Denver'),
( 50.438114, 30.5179595, 'Kiev', 'Europe/Kiev'),
( 12.936873, 77.6909136, 'Jogupalya', 'Asia/Kolkata'),
( 38.889144, -77.0398235, 'Washington DC', 'America/New_York'),
( 59.932490, 30.3164291, 'St Petersburg', 'Europe/Moscow'),
( 50.300624, 127.559166, 'Blagoveshchensk', 'Asia/Yakutsk'),
( 42.439370, -71.0700416, 'Boston', 'America/New_York'),
( 41.84937, -87.6611995, 'Chicago', 'America/Chicago'),
( 28.626873, -81.7584514, 'Orlando', 'America/New_York'),
( 47.610615, -122.3324847, 'Seattle', 'America/Los_Angeles'),
( 51.499990, -0.1353549, 'London', 'Europe/London'),
( 51.256241, -0.8186531, 'Church Crookham', 'Europe/London'),
( 51.292215, -0.8002638, 'Fleet', 'Europe/London'),
( 48.868743, 2.3237586, 'Paris', 'Europe/Paris'),
( 22.158114, 113.5504603, 'Macau', 'Asia/Macau'),
( 56.833123, 60.6097054, 'Russia', 'Asia/Yekaterinburg'),
( 60.887496, 26.6375756, 'Salo', 'Europe/Helsinki'),
( 52.799992, -1.8524408, 'Staffordshire', 'Europe/London'),
( 5.016666, 115.0666667, 'Muara', 'Asia/Brunei'),
(-41.466666, -72.95, 'Puerto Montt seaport', 'America/Santiago'),
( 34.566666, 33.0333333, 'Akrotiri seaport', 'Asia/Nicosia'),
( 37.466666, 126.6166667, 'Inchon seaport', 'Asia/Seoul'),
( 42.8, 132.8833333, 'Nakhodka seaport', 'Asia/Vladivostok'),
( 50.26, -5.051, 'Truro', 'Europe/London'),
( 50.26, -9.051, 'Sea off Cornwall', None),
( 35.82373, -110.72144, 'Hopi Nation', 'America/Phoenix'),
( 35.751956, -110.169460, 'Deni inside Hopi Nation', 'America/Denver'),
( 68.38068073677294, -133.73396065378114, 'Upper hole in America/Yellowknife', 'America/Inuvik')
)
TEST_LOCATIONS_FORCETZ = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 40.7271, -73.98, 'Shore Lake Michigan', 'America/New_York'),
( 50.1536, -8.051, 'Off Cornwall', 'Europe/London'),
( 49.2698, -123.1302, 'Vancouver', 'America/Vancouver'),
( 50.26, -9.051, 'Far off Cornwall', None)
)
def _test_tzwhere(self, locations, forceTZ):
start = datetime.datetime.now()
w = tzwhere.tzwhere(forceTZ=forceTZ)
end = datetime.datetime.now()
print('Initialized in: '),
print(end - start)
template = '{0:20s} | {1:20s} | {2:20s} | {3:2s}'
print(template.format('LOCATION', 'EXPECTED', 'COMPUTED', '=='))
for (lat, lon, loc, expected) in locations:
computed = w.tzNameAt(float(lat), float(lon), forceTZ=forceTZ)
ok = 'OK' if computed == expected else 'XX'
print(template.format(loc, str(expected), str(computed), ok))
assert computed == expected
def test_lookup(self):
self._test_tzwhere(self.TEST_LOCATIONS,forceTZ=False)
def test_forceTZ(self):
self._test_tzwhere(self.TEST_LOCATIONS_FORCETZ,forceTZ=True)
| mit | 8,654,051,610,969,396,000 | 59.065789 | 108 | 0.4977 | false |
hugovk/congress-legislators | scripts/committee_membership.py | 1 | 12853 | #!/usr/bin/env python
# Scrape house.gov and senate.gov for current committee membership,
# and updates the committees-current.yaml file with metadata including
# name, url, address, and phone number.
import re, lxml.html, lxml.etree, io, datetime
from collections import OrderedDict
import utils
from utils import download, load_data, save_data, parse_date
def run():
committee_membership = { }
committees_current = load_data("committees-current.yaml")
memberships_current = load_data("committee-membership-current.yaml")
# default to not caching
cache = utils.flags().get('cache', False)
force = not cache
# map house/senate committee IDs to their dicts
house_ref = { }
for cx in committees_current:
if "house_committee_id" in cx:
house_ref[cx["house_committee_id"]] = cx
senate_ref = { }
for cx in committees_current:
if "senate_committee_id" in cx:
senate_ref[cx["senate_committee_id"]] = cx
# map state/district to current representatives and state/lastname to current senators
# since the House/Senate pages do not provide IDs for Members of Congress
today = datetime.datetime.now().date()
legislators_current = load_data("legislators-current.yaml")
congressmen = { }
senators = { }
for moc in legislators_current:
term = moc["terms"][-1]
if today < parse_date(term["start"]) or today > parse_date(term["end"]):
raise ValueError("Member's last listed term is not current: " + repr(moc) + " / " + term["start"])
if term["type"] == "rep":
congressmen["%s%02d" % (term["state"], term["district"])] = moc
elif term["type"] == "sen":
for n in [moc["name"]] + moc.get("other_names", []):
senators[(term["state"], n["last"])] = moc
# Scrape clerk.house.gov...
def scrape_house_alt():
for id, cx in list(house_ref.items()):
scrape_house_committee(cx, cx["thomas_id"], id + "00")
def scrape_house():
"""The old way of scraping House committees was to start with the committee list
at the URL below, but this page no longer has links to the committee info pages
even though those pages exist. Preserving this function in case we need it later."""
url = "http://clerk.house.gov/committee_info/index.aspx"
body = download(url, "committees/membership/house.html", force)
for id, name in re.findall(r'<a href="/committee_info/index.aspx\?comcode=(..)00">(.*)</a>', body, re.I):
if id not in house_ref:
print("Unrecognized committee:", id, name)
continue
cx = house_ref[id]
scrape_house_committee(cx, cx["thomas_id"], id + "00")
def scrape_house_committee(cx, output_code, house_code):
# load the House Clerk's committee membership page for the committee
# (it is encoded in utf-8 even though the page indicates otherwise, and
# while we don't really care, it helps our sanity check that compares
# names)
url = "http://clerk.house.gov/committee_info/index.aspx?%s=%s" % ('comcode' if house_code[-2:] == '00' else 'subcomcode', house_code)
body = download(url, "committees/membership/house/%s.html" % house_code, force)
dom = lxml.html.parse(io.StringIO(body)).getroot()
# update official name metadata
if house_code[-2:] == "00":
cx["name"] = "House " + str(dom.cssselect("#com_display h3")[0].text_content())
else:
cx["name"] = str(dom.cssselect("#subcom_title h4")[0].text_content())
# update address/phone metadata
address_info = re.search(r"""Mailing Address:\s*(.*\S)\s*Telephone:\s*(\(202\) .*\S)""", dom.cssselect("#address")[0].text_content(), re.I | re.S)
if not address_info: raise Exception("Failed to parse address info in %s." % house_code)
cx["address"] = address_info.group(1)
cx["address"] = re.sub(r"\s+", " ", cx["address"])
cx["address"] = re.sub(r"(.*\S)(Washington, DC \d+)\s*(-\d+)?", lambda m : m.group(1) + "; " + m.group(2) + (m.group(3) if m.group(3) else ""), cx["address"])
cx["phone"] = address_info.group(2)
# get the ratio line to use in a sanity check later
ratio = dom.cssselect("#ratio")
if len(ratio): # some committees are missing
ratio = re.search(r"Ratio (\d+)/(\d+)", ratio[0].text_content())
else:
ratio = None
# scan the membership, which is listed by party
for i, party, nodename in ((1, 'majority', 'primary'), (2, 'minority', 'secondary')):
ctr = 0
for rank, node in enumerate(dom.cssselect("#%s_group li" % nodename)):
ctr += 1
lnk = node.cssselect('a')
if len(lnk) == 0:
if node.text_content() == "Vacancy": continue
raise ValueError("Failed to parse a <li> node.")
moc = lnk[0].get('href')
m = re.search(r"statdis=([A-Z][A-Z]\d\d)", moc)
if not m: raise ValueError("Failed to parse member link: " + moc)
if not m.group(1) in congressmen:
print("Vacancy discrepancy? " + m.group(1))
continue
moc = congressmen[m.group(1)]
found_name = node.cssselect('a')[0].text_content().replace(", ", "")
if moc['name'].get("official_full", None) is None:
print("No official_full field for %s" % found_name)
continue
if found_name != moc['name']['official_full']:
print(("Name mismatch: %s (in our file) vs %s (on the Clerk page)" % (moc['name']['official_full'], node.cssselect('a')[0].text_content())).encode("utf8"))
entry = OrderedDict()
entry["name"] = moc['name']['official_full']
entry["party"] = party
entry["rank"] = rank+1
if rank == 0:
entry["title"] = "Chair" if entry["party"] == "majority" else "Ranking Member" # not explicit, frown
entry.update(ids_from(moc["id"]))
committee_membership.setdefault(output_code, []).append(entry)
# the .tail attribute has the text to the right of the link
m = re.match(r", [A-Z][A-Z](,\s*)?(.*\S)?", lnk[0].tail)
if m.group(2):
# Chairman, Vice Chair, etc. (all but Ex Officio) started appearing on subcommittees around Feb 2014.
# For the chair, this should overwrite the implicit title given for the rank 0 majority party member.
if m.group(2) in ("Chair", "Chairman", "Chairwoman"):
entry["title"] = "Chair"
elif m.group(2) in ("Vice Chair", "Vice Chairman"):
entry["title"] = "Vice Chair"
elif m.group(2) == "Ex Officio":
entry["title"] = m.group(2)
else:
raise ValueError("Unrecognized title information '%s' in %s." % (m.group(2), url))
# sanity check we got the right number of nodes
if ratio and ctr != int(ratio.group(i)): raise ValueError("Parsing didn't get the right count of members.")
# scan for subcommittees
for subcom in dom.cssselect("#subcom_list li a"):
m = re.search("subcomcode=(..(\d\d))", subcom.get('href'))
if not m: raise ValueError("Failed to parse subcommittee link.")
for sx in cx['subcommittees']:
if sx["thomas_id"] == m.group(2):
break
else:
print("Subcommittee not found, creating it", output_code, m.group(1))
sx = OrderedDict()
sx['name'] = "[not initialized]" # will be set inside of scrape_house_committee
sx['thomas_id'] = m.group(2)
cx['subcommittees'].append(sx)
scrape_house_committee(sx, cx["thomas_id"] + sx["thomas_id"], m.group(1))
# Scrape senate.gov....
def scrape_senate():
url = "https://www.senate.gov/pagelayout/committees/b_three_sections_with_teasers/membership.htm"
body = download(url, "committees/membership/senate.html", force)
for id, name in re.findall(r'value="/general/committee_membership/committee_memberships_(....).htm">(.*?)</option>', body, re.I | re.S):
if id not in senate_ref:
print("Unrecognized committee:", id, name)
continue
cx = senate_ref[id]
is_joint = (id[0] == "J")
# Scrape some metadata on the HTML page first.
committee_url = "https://www.senate.gov/general/committee_membership/committee_memberships_%s.htm" % id
print("[%s] Fetching members for %s (%s)" % (id, name, committee_url))
body2 = download(committee_url, "committees/membership/senate/%s.html" % id, force)
if not body2:
print("\tcommittee page not good:", committee_url)
continue
m = re.search(r'<span class="contenttext"><a href="(http://(.*?).senate.gov/)">', body2, re.I)
if m:
cx["url"] = m.group(1)
# Use the XML for the rest.
print("\tDownloading XML...")
committee_url = "https://www.senate.gov/general/committee_membership/committee_memberships_%s.xml" % id
body3 = download(committee_url, "committees/membership/senate/%s.xml" % id, force)
dom = lxml.etree.fromstring(body3.encode("utf8")) # must be bytes to parse if there is an encoding declaration inside the string
cx["name"] = dom.xpath("committees/committee_name")[0].text
if id[0] != "J" and id[0:2] != 'SC':
cx["name"] = "Senate " + cx["name"]
majority_party = dom.xpath("committees/majority_party")[0].text
# update full committee members
committee_membership[id] = []
for member in dom.xpath("committees/members/member"):
scrape_senate_member(committee_membership[id], member, majority_party, is_joint)
# update subcommittees
for subcom in dom.xpath("committees/subcommittee"):
scid = subcom.xpath("committee_code")[0].text[4:]
for sx in cx.get('subcommittees', []):
if sx["thomas_id"] == scid:
break
else:
print("Subcommittee not found, creating it", scid, name)
sx = OrderedDict()
sx['thomas_id'] = scid
cx.setdefault('subcommittees', []).append(sx)
# update metadata
name = subcom.xpath("subcommittee_name")[0].text
sx["name"] = name.strip()
sx["name"] = re.sub(r"^\s*Subcommittee on\s*", "", sx["name"])
sx["name"] = re.sub(r"\s+", " ", sx["name"])
committee_membership[id + scid] = []
for member in subcom.xpath("members/member"):
scrape_senate_member(committee_membership[id + scid], member, majority_party, is_joint)
def scrape_senate_member(output_list, membernode, majority_party, is_joint):
last_name = membernode.xpath("name/last")[0].text
state = membernode.xpath("state")[0].text
party = "majority" if membernode.xpath("party")[0].text == majority_party else "minority"
title = membernode.xpath("position")[0].text
if title == "Member": title = None
if title == "Ranking": title = "Ranking Member"
# look up senator by state and last name
if (state, last_name) not in senators:
print("\t[%s] Unknown member: %s" % (state, last_name))
return None
moc = senators[(state, last_name)]
entry = OrderedDict()
if 'official_full' in moc['name']:
entry["name"] = moc['name']['official_full']
else:
print("missing name->official_full field for", moc['id']['bioguide'])
entry["party"] = party
entry["rank"] = len([e for e in output_list if e["party"] == entry["party"]]) + 1 # how many have we seen so far in this party, +1
if title: entry["title"] = title
entry.update(ids_from(moc["id"]))
if is_joint: entry["chamber"] = "senate"
output_list.append(entry)
# sort by party, then by rank, since we get the nodes in the XML in a rough seniority order that ignores party
# should be done once at the end, but cleaner to do it here
output_list.sort(key = lambda e : (e["party"] != "majority", e["rank"]))
# stick to a specific small set of official IDs to cross-link members
# this limits the IDs from going out of control in this file, while
# preserving us flexibility to be inclusive of IDs in the main leg files
def ids_from(moc):
ids = {}
for id in ["bioguide", "thomas"]:
if id in moc:
ids[id] = moc[id]
if len(ids) == 0:
raise ValueError("Missing an official ID for this legislator, won't be able to link back")
return ids
def restore_house_members_on_joint_committees():
# The House doesn't publish joint committee members, but we're manaually gathering
# that. Add them back into the output from whatever we have on disk. Put them after
# Senate members.
for c, mbrs in list(memberships_current.items()):
if c[0] != "J": continue
for m in mbrs:
if m["chamber"] != "house": continue
committee_membership[c].append(m)
# MAIN
scrape_house()
scrape_senate()
restore_house_members_on_joint_committees()
save_data(committee_membership, "committee-membership-current.yaml")
save_data(committees_current, "committees-current.yaml")
if __name__ == '__main__':
run() | cc0-1.0 | -2,418,726,465,510,119,000 | 41.282895 | 165 | 0.622734 | false |
thomasgurry/amplicon_sequencing_pipeline | scripts/QualityControl.py | 1 | 5687 | """
OVERVIEW:
Python module for quality control of datasets prior to preprocessing.
"""
from __future__ import print_function
import os
import util
import numpy as np
import matplotlib
import sys
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import Formatting as frmt
def check_length_stats(fastq_in):
# Returns 25th, 50th, 75th and 95th percentile of read lengths
iter_seq = util.iter_fsq
x = []
counter = 0
for record in iter_seq(fastq_in):
sid, seq = record[:2]
counter = counter + 1
if(counter > 100000):
break
x.append(len(seq))
x = np.array(x)
return [np.percentile(x,25), np.percentile(x,50), np.percentile(x,75), np.percentile(x,95)]
def read_length_histogram(raw_sequences_file, path, raw_sequences_filetype='FASTQ'):
# Creates a histogram of read lengths
if raw_sequences_filetype == "FASTQ":
iter_seq = util.iter_fsq
else:
iter_seq = util.iter_fst
x = []
counter = 0
for record in iter_seq(raw_sequences_file):
[sid, seq] = record[:2]
counter = counter + 1
if(counter > 100000):
break
x.append(len(seq))
x = np.array(x)
plt.figure()
plt.hist(x, 50)
plt.title('Distribution of amplicon read lengths')
plt.xlabel('Read length')
plt.ylabel('Freq')
plt.savefig(os.path.join(path, 'read_lengths_distribution.png'))
def sample_read_counts(OTU_table, path):
# Takes as input an OTU table in classic dense format, and the folder to write to, and creates a barchart for the number of sequence reads for each sample.
OTU_IDs, sample_IDs, OTU_table = frmt.load_OTU_table_classic(OTU_table)
readcounts = np.sum(OTU_table, axis=0)
plt.figure()
plt.bar(range(len(readcounts)) , readcounts)
plt.xticks(range(len(readcounts)), sample_IDs, rotation='vertical')
plt.title('Mean counts per sample = ' + str(int(np.mean(readcounts))))
plt.xlabel('Sample ID')
plt.ylabel('Read counts')
plt.savefig(os.path.join(path, 'sample_read_counts.png'))
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def reads_thrown_out_at_each_step(raw_split_filenames, output_file):
# Creates a file with % of reads retained after each processing step
raw_counts = 0
sb_counts = 0
pt_counts = 0
qt_counts = 0
lt_counts = 0
fasta_counts = 0
for filename in raw_split_filenames:
raw_counts += file_len(filename)
try:
sb_counts += file_len(filename + '.sb')
except:
pass
try:
pt_counts += file_len(filename + '.sb.pt')
except:
pass
try:
qt_counts += file_len(filename + '.sb.pt.qt')
except:
pass
try:
lt_counts += file_len(filename + '.sb.pt.qt.lt')
except:
pass
try:
fasta_counts += file_len(filename + '.sb.pt.qt.lt.fasta')*2
except:
pass
with open(output_file, 'a+') as fid:
line = 'Number of raw reads = ' + str(raw_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-raw_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
try:
line = 'Number of demultiplexed reads = ' + str(sb_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-sb_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
except:
pass
try:
line = 'Number of primer-trimmed reads = ' + str(pt_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-pt_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
except:
pass
try:
line = 'Number of quality-trimmed reads = ' + str(qt_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-qt_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
except:
pass
try:
line = 'Number of length-trimmed reads = ' + str(lt_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-lt_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
except:
pass
try:
line = 'Number of FASTA reads left = ' + str(fasta_counts)
fid.write(line + '\n')
line = 'Percent of reads left: ' + str(100 - 100*float(raw_counts-fasta_counts)/float(raw_counts)) + '%'
fid.write(line + '\n')
fid.write('\n')
except:
pass
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
def remove_empty_files(filenames, step=''):
# Reads size of each file in filenames and returns a list without any empty files
# Edit added by Claire Duvalet on 6/6/2016
# step is a string that is passed to the stderr statement indicating point at which
# empty files were found. E.g. "barcodes trimming"
keepfiles = []
for f in filenames:
if os.stat(f).st_size != 0:
keepfiles.append(f)
if len(keepfiles) != len(filenames):
warning("found {} empty files after {} step".format(len(filenames) - len(keepfiles), step))
return keepfiles
| mit | -4,760,766,381,229,880,000 | 31.683908 | 161 | 0.569017 | false |
sonaht/es-backup-scripts | restore.py | 1 | 3217 | #!/usr/bin/python
# Dependencies
import sys
import os
import time
import json
import tarfile
import shutil
import requests
# Help text
if len(sys.argv) < 2:
print "Usage:"
print " python restore.py (indexname)"
print " python restore.py (indexname) (elasticsearch host)"
print " python restore.py (indexname) (elasticsearch host) (elasticsearch port)"
exit(0)
# Get the elasticsearch server
if len(sys.argv) > 2:
host = sys.argv[2]
if len(sys.argv) > 3:
port = sys.argv[3]
else:
port = "9200"
else:
host = "localhost"
port = "9200"
url = "http://%s:%s" % (host, port)
print "Using ElasticSearch at %s" % url
try:
r = requests.get(url)
if r.status_code is not 200:
print "Error hitting ElasticSearch on %s, response code was %i" % (url, r.status_code)
exit(1)
else:
print "Verified ElasticSearch server"
except:
print "Unable to hit ElasticSearch on %s" % url
exit(1)
# Check with the user
index = sys.argv[1]
print "Restoring index '%s'" % index
print "Ctrl+C now to abort..."
time.sleep(3)
# Check the index doesnt already exist
r = requests.get("%s/%s/_mapping" % (url, index))
if r.status_code != 404:
print "The index already exists. Please ensure it does not exist first."
print "This command can be executed to do this:"
print "curl -XDELETE %s/%s" % (url, index)
exit(1)
# Unzip the backup file
filename = "%s.esbackup" % index
tar = tarfile.open(filename)
tar.extractall()
tar.close()
# Read the settings
settings_file = open("%s/settings" % index, "r")
settings = json.loads(settings_file.read())
settings_file.close()
main_index = settings.keys()[0]
settings = settings[main_index]
if 'settings' in settings:
settings = settings["settings"]
# Read the schema
schema_file = open("%s/schema" % index, "r")
schema = json.loads(schema_file.read())
schema_file.close()
schema = schema[main_index]
if 'mappings' in schema:
schema = schema['mappings']
# Create the index on the server
data={}
data["mappings"] = schema
data["settings"] = settings
r = requests.put("%s/%s" % (url, main_index), data=json.dumps(data))
if r.status_code != 200:
print "Unable to put the index to the server (%i), aborting" % r.status_code
print r.content
exit(1)
# Load up the data files and put them all in
data_files = os.listdir("%s/data" % index)
for dfile in data_files:
data_file = open("%s/data/%s" % (index, dfile))
items = json.loads(data_file.read())
data_file.close()
bulk = ""
for item in items:
source = item["_source"]
del item["_source"]
command = {}
command["index"] = item
bulk = bulk + json.dumps(command) + "\n" + json.dumps(source) + "\n"
print "Putting %i items" % len(items)
r = requests.post("%s/_bulk" % url, data=bulk)
if r.status_code != 200:
print "Failed with code %i" % r.status_code
exit(1)
# Create index alias if needed
if main_index != index:
alias = {}
alias["actions"] = [{"add": {"index": main_index, "alias": index}}]
r = requests.post("%s/_aliases" % url, data = json.dumps(alias))
if r.status_code != 200:
print "Unable to create the alias of the index (%s), aborting" % main_index
print r.content
exit(1)
# Clean up the directory
shutil.rmtree(index)
print "Finished"
| mit | 6,981,108,509,704,897,000 | 24.330709 | 88 | 0.667392 | false |
EliotBryant/ShadDetector | shadDetector_testing/Gradient Based Methods/sharpen_lowgrad.py | 1 | 5324 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 12:55:55 2017
@author: Eliot
sharpen_lowgrad.py
"""
import os
import numpy as np
import cv2
import skimage.io
from skimage.exposure import rescale_intensity
def ensure_dir(file_path):
''' check for directory and create if necessary'''
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def eliotmask(sobelb, sobelg, sobelr):
'''
Take input of sobels/scharrrs through (b,g,r) space and generate a mask of regions
that are deemed candidate shadow pixels based upon my method of only gradual
gradients that are between 10 and 50.
'''
sobels = [sobelb, sobelg, sobelr]
for sobel in sobels:
masks = [sobel<-65, sobel>=-65, sobel<30, sobel==0, sobel>30, sobel<=65, sobel>65, sobel>=-30, sobel<=30]
pos_mask = masks[4] & masks[5]
neg_mask = masks[1] & masks[2]
zero_mask = masks[7] & masks[8]
sobel[masks[0]] = 0 # big negative gradient
sobel[masks[6]] = 0 # big positive gradient
sobel[pos_mask] = 255 # small positive gradient
sobel[neg_mask] = -255 # small negative gradient
sobel[masks[3]] = 0 # zero gradient
sobel[zero_mask] = 0 # tiny gradient
comparebg = np.bitwise_and(np.int16(sobelb), np.int16(sobelg), dtype=np.int32)
comparegr = np.bitwise_and(np.int16(sobelg), np.int16(sobelr), dtype=np.int32)
outbool = np.logical_and(comparebg, comparegr)
candarr = 255*np.uint8(outbool) # candidate shadow pixels
return candarr
def sobel_xydict(src):
'''
Generate x and y sobel dictionaries of each colour in BGR space.
Input: source image (BGR).
Output: sobelxdict{blue,green,red}
sobelydict{blue,green,red}
'''
# split image to 3 colours
b,g,r = src[...,0], src[...,1], src[...,2]
cols = {"blue":b, "green":g, "red":r}
# initialise dictionaries to place sobels into
sobelxdict, sobelydict = {}, {}
sobeldicts = [sobelxdict, sobelydict]
for num, dicts in enumerate(sobeldicts):
for key, value in cols.items():
sobel = cv2.Sobel(value,cv2.CV_64F,(1-num),num,ksize=3)
dicts[key] = sobel
return sobelxdict, sobelydict
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with
# the spatial dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# allocate memory for the output image, taking care to
# "pad" the borders of the input image so the spatial
# size (i.e., width and height) are not reduced
pad = int((kW - 1) / 2)
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across
# each (x, y)-coordinate from left-to-right and top to
# bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplicate between the ROI and
# the kernel, then summing the matrix
k = (roi * kernel).sum()
# store the convolved value in the output (x,y)-
# coordinate of the output image
output[y - pad, x - pad] = k
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output
# construct a sharpening filter
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# GLOBALS
# files & directories
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "../Salvador/test_files/orig"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/sharp"))
image = cv2.imread(loaddirpath + "/001CROP11-17-59.jpg")
print(image.shape[2])
sharpen_im = np.zeros_like(image)
for colour in range(image.shape[2]):
sharp = convolve(image[..., colour], sharpen)
sharpen_im[..., colour] = sharp
cv2.namedWindow("test", cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow("test", 600, 900)
cv2.imshow("test", sharpen_im)
cv2.imwrite(savedirpath + "/sharp_small.png", sharpen_im)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(sharpen_im.shape)
x, y = sobel_xydict(sharpen_im)
print("max = ", np.max(x["blue"]), "\nmin = ", np.min(x["blue"]))
xblu = x["blue"]
xblu = rescale_intensity(xblu, in_range=(0, 255))
xblu = (xblu * 255).astype("uint8")
cv2.namedWindow("xblue", cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow("xblue", 600, 900)
cv2.imshow("xblue", xblu)
cv2.waitKey(0)
cv2.destroyAllWindows
xelmask = eliotmask(x["blue"],x["green"],x["red"])
yelmask = eliotmask(y["blue"],y["green"],y["red"])
for num, dicts in enumerate([xelmask, yelmask]):
cv2.namedWindow(str(num), cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(str(num), 600, 900)
cv2.imshow(str(num), dicts)
cv2.waitKey(0)
cv2.destroyAllWindows()
| gpl-3.0 | 6,797,915,883,095,718,000 | 31.864198 | 113 | 0.634673 | false |
vmax77/Poppy | pypot/primitive/manager.py | 1 | 2816 | import threading
import numpy
import time
from collections import defaultdict
from functools import partial
class PrimitiveManager(threading.Thread):
""" Combines all :class:`~pypot.primitive.primitive.Primitive` orders and affect them to the real motors.
At a predefined frequency, the manager gathers all the orders sent by the primitive to the "fake" motors, combined them thanks to the filter function and affect them to the "real" motors.
.. note:: The primitives are automatically added (resp. removed) to the manager when they are started (resp. stopped).
"""
def __init__(self, motors, freq=50, filter=partial(numpy.mean, axis=0)):
""" :param motors: list of real motors used by the attached primitives
:type motors: list of :class:`~pypot.dynamixel.motor.DxlMotor`
:param int freq: update frequency
:param func filter: function used to combine the different request (default mean)
"""
threading.Thread.__init__(self, name='Primitive Manager')
self.daemon = True
self._prim = []
self._period = 1.0 / freq
self._motors = motors
self._filter = filter
self._running = threading.Event()
self._running.set()
def add(self, p):
""" Add a primitive to the manager. The primitive automatically attached itself when started. """
self._prim.append(p)
def remove(self, p):
""" Remove a primitive from the manager. The primitive automatically remove itself when stopped. """
self._prim.remove(p)
@property
def primitives(self):
""" List of all attached :class:`~pypot.primitive.primitive.Primitive`. """
return self._prim
def run(self):
""" Combined at a predefined frequency the request orders and affect them to the real motors.
.. note:: Should not be called directly but launch through the thread start method.
"""
while self._running.is_set():
start = time.time()
for m in self._motors:
to_set = defaultdict(list)
for p in self._prim:
for key, val in getattr(p.robot, m.name)._to_set.iteritems():
to_set[key].append(val)
for key, val in to_set.iteritems():
filtred_val = self._filter(val)
setattr(m, key, filtred_val)
end = time.time()
dt = self._period - (end - start)
if dt > 0:
time.sleep(dt)
def stop(self):
""" Stop the primitive manager. """
self._running.clear() | gpl-3.0 | 2,473,339,752,827,600,400 | 36.56 | 195 | 0.571023 | false |
openmaraude/fab_taxi | fabfile/api.py | 1 | 8833 | #coding: utf-8
from fabtools import require, git, python, nginx, supervisor, service, files
from fabric.context_managers import cd, shell_env
from fabric.api import put, run, task, env
from os import environ, path
import time, re
from .dash import restart_stats_workers
@task
def test_uwsgi_is_started(now):
for i in range(1, 30):
status = supervisor.process_status('uwsgi_{}'.format(now))
if status == 'RUNNING':
break
time.sleep(1)
testing_file = '/tmp/test_uwsgi.py'
if files.is_file(testing_file):
files.remove(testing_file)
put('files/test_uwsgi.py', '/tmp/')
require.python.package('six', use_sudo=True)
output = run('python {} {} {} aa'.format(testing_file, env.uwsgi_socket_api(now),
'{}/ads/'.format(env.conf_api.SERVER_NAME)))
assert '"message"' in output
from test_api import test_api
test_api(testing_file, env.uwsgi_socket_api(now), env.conf_api.SERVER_NAME)
def install_swagger_ui():
with cd('~'):
if not files.exists('APITaxi_swagger'):
git.clone('https://github.com/openmaraude/APITaxi_swagger')
git.checkout('APITaxi_swagger')
git.pull('APITaxi_swagger')
return path.join(run('pwd'), 'APITaxi_swagger')
def install_zupc_cache():
with cd('~'):
p = path.join(run('pwd'), 'zupc', 'zupc')
require.files.directory(p, use_sudo=True)
require.files.file(path.join(p, "index.html"),
source="files/zupc.html", use_sudo=True)
return p
def deploy_nginx_api_site(now):
files.upload_template('templates/uwsgi.ini', env.uwsgi_api_config_path(now),
context={
'config_path': env.apitaxi_config_path(now),
'api_path': env.apitaxi_dir(now),
'venv_path': env.apitaxi_venv_path(now),
'uwsgi_file': env.uwsgi_api_file(now),
'uwsgi_pid_file': env.uwsgi_api_pid_file(now),
'uwsgi_log_file1': env.uwsgi_logdir + '/api_launcher.log',
'uwsgi_log_file2': env.uwsgi_logdir + '/api_uwsgi.log',
'uwsgi_launcher_logdir': env.uwsgi_launcher_logdir,
'socket': env.uwsgi_socket_api(now),
'processes': env.wsgi_processes,
'threads': env.wsgi_threads,
'now': now
}
)
files.upload_template('templates/uwsgi.ini', env.uwsgi_front_config_path(now),
context={
'config_path': env.fronttaxi_config_path(now),
'api_path': env.fronttaxi_dir(now),
'venv_path': env.apitaxi_venv_path(now),
'uwsgi_file': env.uwsgi_front_file(now),
'uwsgi_pid_file': env.uwsgi_front_pid_file(now),
'uwsgi_log_file1': env.uwsgi_logdir + '/front_launcher.log',
'uwsgi_log_file2': env.uwsgi_logdir + '/front_uwsgi.log',
'socket': env.uwsgi_socket_front(now),
'processes': env.wsgi_processes,
'threads': env.wsgi_threads,
'now': now
}
)
uwsgi = path.join(env.apitaxi_venv_path(now), 'bin', 'uwsgi')
require.supervisor.process('uwsgi_api_{}'.format(now),
command='{} --ini {}'.format(uwsgi, env.uwsgi_api_config_path(now)),
directory=env.apitaxi_venv_path(now),
stdout_logfile = '/var/log/nginx/apitaxi.log',
user='www-data'
)
require.supervisor.process('uwsgi_front_{}'.format(now),
command='{} --ini {}'.format(uwsgi, env.uwsgi_front_config_path(now)),
directory=env.apitaxi_venv_path(now),
stdout_logfile = '/var/log/nginx/fronttaxi.log',
user='www-data'
)
test_uwsgi_is_started(now)
celery = path.join(env.apitaxi_venv_path(now), 'bin', 'celery')
worker_name = 'send_hail_{}'.format(now)
command = '{} worker --app=celery_worker.celery -Q {} -n {} --workdir={}'
require.supervisor.process(worker_name,
command=command.format(celery, worker_name, worker_name, env.apitaxi_dir(now)),
directory=env.apitaxi_dir(now),
stdout_logfile='/var/log/celery/send_hail.log',
user='www-data',
environment='APITAXI_CONFIG_FILE=prod_settings.py'
)
swagger_dir = install_swagger_ui()
zupc_dir = install_zupc_cache()
require.nginx.site('apitaxi',
template_source='templates/nginx_site.conf',
domain_name=getattr(env.conf_api, 'HOST', 'localhost'),
env='NOW={}'.format(now),
port=getattr(env.conf_api, 'PORT', 80),
socket_api=env.uwsgi_socket_api(now),
socket_front=env.uwsgi_socket_front(now),
doc_dir=swagger_dir,
zupc_cache_dir=zupc_dir
)
path_redis = '{}/redis.sh'.format(env.deployment_dir(now))
require.files.template_file(path=path_redis,
template_source='templates/redis.sh',
context={'deployment_dir':env.deployment_dir(now)},
mode='770')
require.supervisor.process('redis',
command=path_redis,
stdout_logfile='/var/log/redis/error.log'
)
def clean_directories(now):
l = run('for i in {}/deployment_*; do echo $i; done'.format(env.deploy_dir)).split("\n")
for d in [d.replace('\r', '') for d in l]:
if not files.is_dir(d):
continue
if d == env.deployment_dir(now):
continue
files.remove(d, recursive=True)
l = run('for i in {}/apitaxi_*; do echo $i; done'.format(env.uwsgi_socket_dir)).split("\n")
for f in [f.replace('\r', '') for f in l]:
if f == env.uwsgi_socket_api(now):
continue
files.remove(f, use_sudo=True)
#The pid file should be remove when the process stops
def stop_old_processes(now):
def stop_process(name, visitor):
l = run('for i in /etc/supervisor/conf.d/{}_*; do echo $i; done'.format(name)).split("\n")
for f in [f.replace('\r', '') for f in l]:
print 'To remove: {}'.format(f)
if str(now) in f:
continue
file_ = f.split('/')[-1]
process = file_[:-len('.conf')]
visitor(process)
files.remove(f, use_sudo=True)
stop_process('uwsgi', lambda p:supervisor.stop_process(p))
def stop_queues(process):
#Request' status is failure after 15 secs in received
#So even if queue is not empty we can shutdown the process
for i in range(1, 17):
res = run('python manage.py active_tasks {}'.format(process))
if res == '':
break
time.sleep(1)
supervisor.stop_process(process)
with cd(env.apitaxi_dir(now)):
with python.virtualenv(env.apitaxi_venv_path(now)),\
shell_env(APITAXI_CONFIG_FILE=env.apitaxi_config_path(now)):
stop_process('send_hail', stop_queues)
def deploy_front(now):
with cd(env.deployment_dir(now)):
run(u'wget {} -O front.zip'.format(env.fronttaxi_archive))
run('unzip front.zip')
with cd(env.fronttaxi_dir(now)), python.virtualenv(env.apitaxi_venv_path(now)):
python.install_requirements('requirements.txt')
put(environ['APITAXI_CONFIG_FILE'], env.fronttaxi_config_path(now))
def get_admin_key():
return run(
"""psql {} -tAc 'SELECT apikey FROM "user" where email='"'"'admin'"'"';'"""\
.format(env.conf_api.SQLALCHEMY_DATABASE_URI))
def install_admin_user():
if len(get_admin_key()) > 0:
return
run('python manage.py create_admin admin')
@task
def deploy_api(commit='master'):
now = int(time.time())
require.files.directory(env.deployment_dir(now))
with cd(env.deployment_dir(now)):
run(u'wget {}'.format(env.apitaxi_archive.format(commit)))
run('unzip {}.zip'.format(commit))
if commit != 'master':
run('mv APITaxi-{} APITaxi-master'.format(commit))
with cd(env.apitaxi_dir(now)):
require.python.virtualenv(env.apitaxi_venv_path(now), venv_python="python3")
with python.virtualenv(env.apitaxi_venv_path(now)):
python.install_pip(use_sudo=False)
require.python.package('uwsgi')
python.install_requirements('requirements.txt')
put(environ['APITAXI_CONFIG_FILE'], env.apitaxi_config_path(now))
with shell_env(APITAXI_CONFIG_FILE=env.apitaxi_config_path(now)):
for i in range(1, 30):
if service.is_running('supervisor'):
break
time.sleep(1)
run('python manage.py db upgrade')
install_admin_user()
deploy_front(now)
deploy_nginx_api_site(now)
if not service.is_running('nginx'):
service.start('nginx')
clean_directories(now)
stop_old_processes(now)
restart_stats_workers(now)
| mit | -1,103,326,207,926,974,200 | 37.404348 | 98 | 0.590852 | false |
XeCycle/indico | indico/cli/shell.py | 2 | 7727 | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import datetime
import itertools
import os
import re
import sys
from functools import partial
from operator import itemgetter, attrgetter
import transaction
from flask import current_app
from flask_script import Shell, Option
from werkzeug.local import LocalProxy
import MaKaC
from indico.core import signals
from indico.core.celery import celery
from indico.core.config import Config
from indico.core.db import DBMgr, db
from indico.core.index import Catalog
from indico.core.plugins import plugin_engine
from indico.modules.events import Event
from indico.util.console import strip_ansi, cformat
from indico.util.fossilize import clearCache
from indico.web.flask.util import IndicoConfigWrapper
from MaKaC.common import HelperMaKaCInfo
from MaKaC.common.indexes import IndexesHolder
from MaKaC.conference import Conference, ConferenceHolder, CategoryManager
def _add_to_context(namespace, info, element, name=None, doc=None, color='green'):
if not name:
name = element.__name__
namespace[name] = element
if doc:
info.append(cformat('+ %%{%s}{}%%{white!} ({})' % color).format(name, doc))
else:
info.append(cformat('+ %%{%s}{}' % color).format(name))
def _add_to_context_multi(namespace, info, elements, names=None, doc=None, color='green'):
if not names:
names = [x.__name__ for x in elements]
for name, element in zip(names, elements):
namespace[name] = element
if doc:
info.append(cformat('+ %%{white!}{}:%%{reset} %%{%s}{}' % color).format(doc, ', '.join(names)))
else:
info.append(cformat('+ %%{%s}{}' % color).format(', '.join(names)))
def _add_to_context_smart(namespace, info, objects, get_name=attrgetter('__name__'), color='cyan'):
def _get_module(obj):
segments = tuple(obj.__module__.split('.'))
if segments[0].startswith('indico_'): # plugin
return 'plugin:{}'.format(segments[0])
elif segments[:2] == ('indico', 'modules'):
return 'module:{}'.format(segments[2])
elif segments[:2] == ('indico', 'core'):
return 'core:{}'.format(segments[2])
else:
return '.'.join(segments[:-1] if len(segments) > 1 else segments)
items = [(_get_module(obj), get_name(obj), obj) for obj in objects]
for module, items in itertools.groupby(sorted(items, key=itemgetter(0, 1)), key=itemgetter(0)):
names, elements = zip(*((x[1], x[2]) for x in items))
_add_to_context_multi(namespace, info, elements, names, doc=module, color=color)
class IndicoShell(Shell):
def __init__(self):
banner = cformat('%{yellow!}Indico v{} is ready for your commands!').format(MaKaC.__version__)
super(IndicoShell, self).__init__(banner=banner, use_bpython=False)
self._context = None
self._info = None
self._quiet = False
def __call__(self, app, *args, **kwargs):
with app.test_request_context(base_url=Config.getInstance().getBaseURL()):
return self.run(*args, **kwargs)
def run(self, no_ipython, use_bpython, quiet):
context = self.get_context()
if not quiet:
self.banner = '\n'.join(self._info + ['', self.banner])
if use_bpython:
# bpython does not support escape sequences :(
# https://github.com/bpython/bpython/issues/396
self.banner = strip_ansi(self.banner)
clearCache()
with context['dbi'].global_connection():
super(IndicoShell, self).run(no_ipython or use_bpython, not use_bpython)
def get_options(self):
return (
Option('--no-ipython', action='store_true', dest='no_ipython', default=False,
help="Do not use the IPython shell"),
Option('--use-bpython', action='store_true', dest='use_bpython', default=False,
help="Use the BPython shell"),
Option('--quiet', '-q', action='store_true', dest='quiet', default=False,
help="Do not print the shell context")
)
def get_context(self):
if self._context is None:
self._context = context = {}
self._info = []
add_to_context = partial(_add_to_context, context, self._info)
add_to_context_multi = partial(_add_to_context_multi, context, self._info)
add_to_context_smart = partial(_add_to_context_smart, context, self._info)
# Common stdlib modules
self._info.append(cformat('*** %{magenta!}stdlib%{reset} ***'))
add_to_context_multi([getattr(datetime, attr) for attr in ('date', 'time', 'datetime', 'timedelta')] +
[itertools, re, sys, os],
color='yellow')
# Legacy Indico
self._info.append(cformat('*** %{magenta!}Legacy%{reset} ***'))
add_to_context_multi([Conference, ConferenceHolder, CategoryManager, Catalog, IndexesHolder], color='green')
add_to_context(LocalProxy(HelperMaKaCInfo.getMaKaCInfoInstance), 'minfo', color='green')
# Models
self._info.append(cformat('*** %{magenta!}Models%{reset} ***'))
models = [cls for name, cls in sorted(db.Model._decl_class_registry.items(), key=itemgetter(0))
if hasattr(cls, '__table__')]
add_to_context_smart(models)
# Tasks
self._info.append(cformat('*** %{magenta!}Tasks%{reset} ***'))
tasks = [task for task in sorted(celery.tasks.values()) if not task.name.startswith('celery.')]
add_to_context_smart(tasks, get_name=lambda x: x.name.replace('.', '_'), color='blue!')
# Plugins
self._info.append(cformat('*** %{magenta!}Plugins%{reset} ***'))
plugins = [type(plugin) for plugin in sorted(plugin_engine.get_active_plugins().values(),
key=attrgetter('name'))]
add_to_context_multi(plugins, color='yellow!')
# Utils
self._info.append(cformat('*** %{magenta!}Misc%{reset} ***'))
add_to_context(celery, 'celery', doc='celery app', color='blue!')
add_to_context(DBMgr.getInstance(), 'dbi', doc='zodb db interface', color='cyan!')
add_to_context(db, 'db', doc='sqlalchemy db interface', color='cyan!')
add_to_context(transaction, doc='transaction module', color='cyan!')
add_to_context(IndicoConfigWrapper(Config.getInstance()), 'config', doc='indico config')
add_to_context(current_app, 'app', doc='flask app')
add_to_context(lambda x: ConferenceHolder().getById(x, True), 'E', doc='get event by id (Conference)')
add_to_context(Event.get, 'EE', doc='get event by id (Event)')
# Stuff from plugins
signals.plugin.shell_context.send(add_to_context=add_to_context, add_to_context_multi=add_to_context_multi)
return self._context
| gpl-3.0 | -9,028,294,536,051,959,000 | 46.404908 | 120 | 0.619387 | false |
seehuhn/wisent | doc/web/conf.py | 1 | 2043 | import sys, os, re
extensions = [ ]
templates_path = [ 'templates' ]
source_suffix = '.txt'
master_doc = 'index'
# General information about the project.
project = 'wisent'
copyright = '2012, Jochen Voss'
m = re.search(r'AC_INIT\(wisent, *(([0-9]+\.[0-9]+)[^, ]*),',
open("../../configure.ac").read(),
re.MULTILINE)
version = m.group(2)
release = m.group(1)
del m
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = [ 'html', 'web', 'latex' ]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_title = "Wisent Users' Manual"
html_use_modindex = False
html_use_index = True
html_copy_source = False
html_theme = 'default'
html_theme_options = {
"rightsidebar": "true",
"footerbgcolor": "#B4C981", # Background color for the footer line.
"footertextcolor": "black", # Text color for the footer line.
"sidebarbgcolor": "#D4D991", # Background color for the sidebar.
"sidebartextcolor": "black", # Text color for the sidebar.
"sidebarlinkcolor": "#C00000", # Link color for the sidebar.
"relbarbgcolor": "#B4C981", # Background color for the relation bar.
"relbartextcolor": "black", # Text color for the relation bar.
"relbarlinkcolor": "#C00000", # Link color for the relation bar.
"bgcolor": "#DBDEB7", # Body background color.
"textcolor": "black", # Body text color.
"linkcolor": "#C00000", # Body link color.
"headbgcolor": "#DBDEB7", # Background color for headings.
"headtextcolor": "black", # Text color for headings.
"headlinkcolor": "black", # Link color for headings.
"codebgcolor": "white", # Background color for code blocks.
"codetextcolor": "black", # Default text color for code blocks
# "bodyfont": "", # (CSS font-family): Font for normal text.
# "headfont": "", # (CSS font-family): Font for headings.
}
| gpl-2.0 | -5,163,218,314,409,122,000 | 35.482143 | 79 | 0.633382 | false |
kumina/django-powerdns-manager | src/powerdns_manager/admin.py | 1 | 13091 | # -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.contrib import admin
from django.db.models.loading import cache
from django.contrib import messages
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import get_random_string
from powerdns_manager import settings
from powerdns_manager.forms import DomainModelForm
from powerdns_manager.forms import SoaRecordModelForm
from powerdns_manager.forms import NsRecordModelForm
from powerdns_manager.forms import MxRecordModelForm
from powerdns_manager.forms import SrvRecordModelForm
from powerdns_manager.forms import ARecordModelForm
from powerdns_manager.forms import AaaaRecordModelForm
from powerdns_manager.forms import CnameRecordModelForm
from powerdns_manager.forms import PtrRecordModelForm
from powerdns_manager.forms import TxtRecordModelForm
from powerdns_manager.forms import DsRecordModelForm
from powerdns_manager.forms import CertRecordModelForm
from powerdns_manager.forms import HinfoRecordModelForm
from powerdns_manager.forms import LocRecordModelForm
from powerdns_manager.forms import SpfRecordModelForm
from powerdns_manager.forms import SshfpRecordModelForm
from powerdns_manager.forms import RpRecordModelForm
from powerdns_manager.forms import NaptrRecordModelForm
from powerdns_manager.forms import AfsdbRecordModelForm
from powerdns_manager.forms import DnskeyRecordModelForm
from powerdns_manager.forms import KeyRecordModelForm
from powerdns_manager.forms import NsecRecordModelForm
from powerdns_manager.forms import RrsigRecordModelForm
from powerdns_manager.signal_cb import zone_saved
from powerdns_manager.actions import set_domain_type_bulk
from powerdns_manager.actions import set_ttl_bulk
from powerdns_manager.actions import force_serial_update
from powerdns_manager.actions import reset_api_key
from powerdns_manager.actions import clone_zone
from powerdns_manager.utils import generate_api_key
class DynamicZoneInline(admin.StackedInline):
model = cache.get_model('powerdns_manager', 'DynamicZone')
fields = ('is_dynamic', 'api_key')
readonly_fields = ('api_key', )
search_fields = ('domain', )
verbose_name = 'Dynamic Zone'
verbose_name_plural = 'Dynamic Zone' # Only one dynamic zone per domain
can_delete = False
# Show exactly one form
extra = 1
max_num = 1
class BaseTabularRecordInline(admin.TabularInline):
RR_TYPE = '__OVERRIDE__'
form = '__OVERRIDE__'
model = cache.get_model('powerdns_manager', 'Record')
extra = 0
fields = ('name', 'ttl', 'content')
def __init__(self, *args, **kwargs):
self.verbose_name = '%s Resource Record' % self.RR_TYPE
self.verbose_name_plural = '%s Resource Records' % self.RR_TYPE
super(BaseTabularRecordInline, self).__init__(*args, **kwargs)
def queryset(self, request):
"""Return only RR_TYPE records"""
qs = super(BaseTabularRecordInline, self).queryset(request)
return qs.filter(type=self.RR_TYPE)
class SoaRecordInline(admin.StackedInline):
model = cache.get_model('powerdns_manager', 'Record')
form = SoaRecordModelForm
# Show exactly one form
extra = 1
max_num = 1
verbose_name = 'SOA Resource Record'
verbose_name_plural = 'SOA Resource Record' # Only one SOA RR per zone
# The ``name`` field is not available for editing. It is always set to the
# name of the domain in ``forms.SoaRecordModelForm.save()`` method.
fields = ('ttl', 'primary', 'hostmaster', 'serial', 'refresh', 'retry', 'expire', 'default_ttl')
can_delete = False
def queryset(self, request):
"""Return only SOA records"""
qs = super(SoaRecordInline, self).queryset(request)
return qs.filter(type='SOA')
class NsRecordInline(BaseTabularRecordInline):
RR_TYPE = 'NS'
form = NsRecordModelForm
class MxRecordInline(BaseTabularRecordInline):
RR_TYPE = 'MX'
form = MxRecordModelForm
fields = ('name', 'ttl', 'prio', 'content')
class SrvRecordInline(BaseTabularRecordInline):
RR_TYPE = 'SRV'
form = SrvRecordModelForm
fields = ('name', 'ttl', 'prio', 'weight', 'port', 'target')
class ARecordInline(BaseTabularRecordInline):
RR_TYPE = 'A'
form = ARecordModelForm
class AaaaRecordInline(BaseTabularRecordInline):
RR_TYPE = 'AAAA'
form = AaaaRecordModelForm
class CnameRecordInline(BaseTabularRecordInline):
RR_TYPE = 'CNAME'
form = CnameRecordModelForm
class PtrRecordInline(BaseTabularRecordInline):
RR_TYPE = 'PTR'
form = PtrRecordModelForm
class TxtRecordInline(BaseTabularRecordInline):
RR_TYPE = 'TXT'
form = TxtRecordModelForm
class DsRecordInline(BaseTabularRecordInline):
RR_TYPE = 'DS'
form = DsRecordModelForm
class CertRecordInline(BaseTabularRecordInline):
RR_TYPE = 'CERT'
form = CertRecordModelForm
class HinfoRecordInline(BaseTabularRecordInline):
RR_TYPE = 'HINFO'
form = HinfoRecordModelForm
class LocRecordInline(BaseTabularRecordInline):
RR_TYPE = 'LOC'
form = LocRecordModelForm
class SpfRecordInline(BaseTabularRecordInline):
RR_TYPE = 'SPF'
form = SpfRecordModelForm
class SshfpRecordInline(BaseTabularRecordInline):
RR_TYPE = 'SSHFP'
form = SshfpRecordModelForm
class RpRecordInline(BaseTabularRecordInline):
RR_TYPE = 'RP'
form = RpRecordModelForm
class NaptrRecordInline(BaseTabularRecordInline):
RR_TYPE = 'NAPTR'
form = NaptrRecordModelForm
class AfsdbRecordInline(BaseTabularRecordInline):
RR_TYPE = 'AFSDB'
form = AfsdbRecordModelForm
class DnskeyRecordInline(BaseTabularRecordInline):
RR_TYPE = 'DNSKEY'
form = DnskeyRecordModelForm
class KeyRecordInline(BaseTabularRecordInline):
RR_TYPE = 'KEY'
form = KeyRecordModelForm
class NsecRecordInline(BaseTabularRecordInline):
RR_TYPE = 'NSEC'
form = NsecRecordModelForm
class RrsigRecordInline(BaseTabularRecordInline):
RR_TYPE = 'RRSIG'
form = RrsigRecordModelForm
class EmptyNonTerminalRecordInline(admin.TabularInline):
"""Special inline for empty non-terminals supported by PowerDNS 3.2.
See: http://doc.powerdns.com/dnssec-modes.html#dnssec-direct-database
"""
model = cache.get_model('powerdns_manager', 'Record')
extra = 0
verbose_name = 'Empty Non-Terminal Resource Record'
verbose_name_plural = 'Empty Non-Terminal Resource Record' # Only one SOA RR per zone
fields = ('name', 'ttl', 'content')
readonly_fields = ('name', 'ttl', 'content')
can_delete = False
def queryset(self, request):
"""Return only Empty Non-Terminal records"""
qs = super(EmptyNonTerminalRecordInline, self).queryset(request)
return qs.filter(type__isnull=True)
class DomainMetadataInline(admin.TabularInline):
model = cache.get_model('powerdns_manager', 'DomainMetadata')
fields = ('kind', 'content', )
extra = 0
verbose_name_plural = 'Domain Metadata'
class CryptoKeyInline(admin.TabularInline):
model = cache.get_model('powerdns_manager', 'CryptoKey')
fields = ('flags', 'active', 'content')
extra = 0
verbose_name_plural = 'Crypto Keys'
class DomainAdmin(admin.ModelAdmin):
form = DomainModelForm
fields = ('date_modified', 'name', 'type', 'master')
readonly_fields = ('date_modified', )
list_display = ('name', 'export_zone_html_link', 'type', 'master', 'date_modified')
list_filter = ('type', )
search_fields = ('name', 'master')
verbose_name = 'zone'
verbose_name_plural = 'zones'
save_on_top = True
actions = [reset_api_key, set_domain_type_bulk, set_ttl_bulk, force_serial_update, clone_zone]
change_list_template = 'powerdns_manager/domain_changelist.html'
#
# Build the ``inlines`` list. Only inlines for enabled RR types are included.
#
inlines = [DynamicZoneInline]
# Resource Record type to Resource Record Inline Map
RR_INLINE_MAP = {
'A': ARecordInline,
'AAAA': AaaaRecordInline,
'AFSDB': AfsdbRecordInline,
'CERT': CertRecordInline,
'CNAME': CnameRecordInline,
'DNSKEY': DnskeyRecordInline,
'DS': DsRecordInline,
'HINFO': HinfoRecordInline,
'KEY': KeyRecordInline,
'LOC': LocRecordInline,
'MX': MxRecordInline,
'NAPTR': NaptrRecordInline,
'NS': NsRecordInline,
'NSEC': NsecRecordInline,
'PTR': PtrRecordInline,
'RP': RpRecordInline,
'RRSIG': RrsigRecordInline,
'SOA': SoaRecordInline,
'SPF': SpfRecordInline,
'SSHFP': SshfpRecordInline,
'SRV': SrvRecordInline,
'TXT': TxtRecordInline,
}
# Add RR inlines
for RR_TYPE in settings.PDNS_ENABLED_RR_TYPES:
inlines.append(RR_INLINE_MAP[RR_TYPE])
# Add other inlines
#inlines.append(EmptyNonTerminalRecordInline) # TODO: empty non-terminal record support is for the future
inlines.append(DomainMetadataInline)
inlines.append(CryptoKeyInline)
def queryset(self, request):
qs = super(DomainAdmin, self).queryset(request)
if not request.user.is_superuser:
# Non-superusers see the domains they have created
qs = qs.filter(created_by=request.user)
return qs
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.save()
# The zone serial is updated after all RRs have been saved.
# This is accomplished by sending the ``zone_saved`` signal in ``save_related()``
def save_related(self, request, form, formsets, change):
"""Calls the signal that rectifies the zone.
In ModelAdmin.add_view() and ModelAdmin.change_view() the method
save_model() is normally called before save_related().
Using a post_save signal on the Domain or Record models is not
efficient. In case of the Domain model, rectify_zone() would not process
any new data in the associated records. In case of the Record model,
rectify_zone() would be called multiple times and only the last call
would be the effective one.
rectify_zone() must be called after all the records and the domain have
been saved to the database.
Here we execute the parent save_related() and then we call rectify zone
through a custom signal.
"""
super(DomainAdmin, self).save_related(request, form, formsets, change)
# Send the zone_saved signal
zone_saved.send(sender=self.model, instance=form.instance)
admin.site.register(cache.get_model('powerdns_manager', 'Domain'), DomainAdmin)
class TsigKeyAdmin(admin.ModelAdmin):
fields = ('name', 'algorithm', 'secret', 'date_modified')
readonly_fields = ('date_modified', )
list_display = ('name', 'algorithm', 'date_modified')
list_filter = ('algorithm', )
search_fields = ('name', )
verbose_name = 'TSIG Key'
verbose_name_plural = 'TSIG Keys'
def queryset(self, request):
qs = super(TsigKeyAdmin, self).queryset(request)
if not request.user.is_superuser:
# Non-superusers see the records they have created
qs = qs.filter(created_by=request.user)
return qs
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.save()
admin.site.register(cache.get_model('powerdns_manager', 'TsigKey'), TsigKeyAdmin)
class SuperMasterAdmin(admin.ModelAdmin):
fields = ('ip', 'nameserver', 'account', 'date_modified')
readonly_fields = ('date_modified', )
list_display = ('ip', 'nameserver', 'account', 'date_modified')
search_fields = ('nameserver', 'account')
verbose_name = 'SuperMaster'
verbose_name_plural = 'SuperMasters'
admin.site.register(cache.get_model('powerdns_manager', 'SuperMaster'), SuperMasterAdmin)
| apache-2.0 | -4,980,581,423,078,971,000 | 34.285714 | 112 | 0.693301 | false |
openstack/networking-hyperv | networking_hyperv/neutron/trunk_driver.py | 1 | 5778 | # Copyright 2017 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.services.trunk.rpc import agent as trunk_rpc
from neutron_lib.services.trunk import constants as t_const
from os_win import constants as os_win_const
from os_win import utilsfactory
from oslo_log import log as logging
import oslo_messaging
from networking_hyperv.common.i18n import _LI, _LE # noqa
LOG = logging.getLogger(__name__)
class HyperVTrunkDriver(trunk_rpc.TrunkSkeleton):
"""Driver responsible for handling trunk/subport/port events.
Receives data model events from the neutron server and uses them to setup
VLAN trunks for Hyper-V vSwitch ports.
"""
def __init__(self, context):
super(HyperVTrunkDriver, self).__init__()
self._context = context
self._utils = utilsfactory.get_networkutils()
self._trunk_rpc = trunk_rpc.TrunkStub()
# Map between trunk.id and trunk.
self._trunks = {}
def handle_trunks(self, trunks, event_type):
"""Trunk data model change from the server."""
LOG.debug("Trunks event received: %(event_type)s. Trunks: %(trunks)s",
{'event_type': event_type, 'trunks': trunks})
if event_type == events.DELETED:
# The port trunks have been deleted. Remove them from cache.
for trunk in trunks:
self._trunks.pop(trunk.id, None)
else:
for trunk in trunks:
self._trunks[trunk.id] = trunk
self._setup_trunk(trunk)
def handle_subports(self, subports, event_type):
"""Subport data model change from the server."""
LOG.debug("Subports event received: %(event_type)s. "
"Subports: %(subports)s",
{'event_type': event_type, 'subports': subports})
# update the cache.
if event_type == events.CREATED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk:
trunk.sub_ports.append(subport)
elif event_type == events.DELETED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk and subport in trunk.sub_ports:
trunk.sub_ports.remove(subport)
# update the bound trunks.
affected_trunk_ids = set([s['trunk_id'] for s in subports])
for trunk_id in affected_trunk_ids:
trunk = self._trunks.get(trunk_id)
if trunk:
self._setup_trunk(trunk)
def bind_vlan_port(self, port_id, segmentation_id):
trunk = self._fetch_trunk(port_id)
if not trunk:
# No trunk found. No VLAN IDs to set in trunk mode.
self._set_port_vlan(port_id, segmentation_id)
return
self._setup_trunk(trunk, segmentation_id)
def _fetch_trunk(self, port_id, context=None):
context = context or self._context
try:
trunk = self._trunk_rpc.get_trunk_details(context, port_id)
LOG.debug("Found trunk for port_id %(port_id)s: %(trunk)s",
{'port_id': port_id, 'trunk': trunk})
# cache it.
self._trunks[trunk.id] = trunk
return trunk
except resources_rpc.ResourceNotFound:
return None
except oslo_messaging.RemoteError as ex:
if 'CallbackNotFound' not in str(ex):
raise
LOG.debug("Trunk plugin disabled on server. Assuming port %s is "
"not a trunk.", port_id)
return None
def _setup_trunk(self, trunk, vlan_id=None):
"""Sets up VLAN trunk and updates the trunk status."""
LOG.info('Binding trunk port: %s.', trunk)
try:
# bind sub_ports to host.
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.TRUNK_ACTIVE_STATUS)
except Exception:
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.TRUNK_DEGRADED_STATUS)
def _set_port_vlan(self, port_id, vlan_id, vlan_trunk=None):
LOG.info('Binding VLAN ID: %(vlan_id)s, VLAN trunk: '
'%(vlan_trunk)s to switch port %(port_id)s',
dict(vlan_id=vlan_id, vlan_trunk=vlan_trunk, port_id=port_id))
op_mode = (os_win_const.VLAN_MODE_TRUNK if vlan_trunk else
os_win_const.VLAN_MODE_ACCESS)
self._utils.set_vswitch_port_vlan_id(
vlan_id,
port_id,
operation_mode=op_mode,
trunk_vlans=vlan_trunk)
| apache-2.0 | -6,793,835,711,361,600,000 | 38.575342 | 79 | 0.59242 | false |
m1lhaus/woofer | forms/library_form.py | 1 | 2753 | # -*- coding: utf-8 -*-
#
# Woofer - free open-source cross-platform music player
# Copyright (C) 2015 Milan Herbig <milanherbig[at]gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
All GUI components from library dialog initialized here.
"""
import logging
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from components.translator import tr
logger = logging.getLogger(__name__)
class Ui_libraryDialog(object):
def setupUi(self, libraryDialog):
libraryDialog.setObjectName("libraryDialog")
libraryDialog.resize(480, 256)
libraryDialog.setWindowFlags(libraryDialog.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.gridLayout = QGridLayout(libraryDialog)
self.label = QLabel(libraryDialog)
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.folderList = QListWidget(libraryDialog)
self.gridLayout.addWidget(self.folderList, 1, 0, 1, 1)
# ADD and REMOVE buttons
self.verticalLayout = QVBoxLayout()
self.addBtn = QPushButton(libraryDialog)
self.verticalLayout.addWidget(self.addBtn)
self.removeBtn = QPushButton(libraryDialog)
self.verticalLayout.addWidget(self.removeBtn)
spacerItem = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.gridLayout.addLayout(self.verticalLayout, 1, 1, 1, 1)
self.buttonBox = QDialogButtonBox(libraryDialog)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Save)
self.buttonBox.button(QDialogButtonBox.Save).setText(tr['BUTTON_SAVE'])
self.buttonBox.button(QDialogButtonBox.Cancel).setText(tr['BUTTON_CANCEL'])
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 2)
self.retranslateUi(libraryDialog)
def retranslateUi(self, libraryDialog):
libraryDialog.setWindowTitle(tr['MEDIA_LIBRARY'])
self.label.setText(tr['ADD_MEDIALIB_FOLDER_LBL'])
self.addBtn.setText(tr['ADD'])
self.removeBtn.setText(tr['REMOVE']) | gpl-3.0 | 5,352,283,054,877,990,000 | 37.788732 | 98 | 0.723938 | false |
jinzekid/codehub | python/基本操作/控制语句.py | 1 | 1247 | # Author: Jason Lu
# 猜数字
age_of_oldboy = 56
guess_age = int(input("guess age:"))
if guess_age == age_of_oldboy:
print("Yes, you got it.")
elif guess_age > age_of_oldboy:
print("think smaller...")
else:
print("think bigger...")
print("===============for语句================")
count = 0
age_of_oldboy = 56
for i in range(3):
guess_age = int(input("guess age:"))
if guess_age == age_of_oldboy:
print("Yes, you got it.")
break
elif guess_age > age_of_oldboy:
print("think smaller...")
else:
print("think bigger...")
count += 1
else:
print("you have tried too many times...fuck off")
for i in range(10):
print('i:', i)
for i in range(0, 10, 2):
print('i: ', i)
for i in range(0, 100):
if i < 50:
print('i < 50')
else:
continue
print("===============while语句================")
count = 0
age_of_oldboy = 100
while count < 3:
int_guess_age = int(input(">>guess age:"))
if int_guess_age == age_of_oldboy:
break
elif int_guess_age < age_of_oldboy:
print('think bigger')
else:
print('think smaller')
count += 1
else:
print('You have tried too many times...fuck off')
| gpl-3.0 | -6,562,586,918,179,522,000 | 14.4125 | 53 | 0.529603 | false |
0x0all/nupic | tests/integration/py2/nupic/swarming/experiments/field_contrib_temporal/description.py | 1 | 15607 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
################################################################################
control = {
# The environment that the current model is being run in
"environment": 'grok',
# Input stream specification per py/grokengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*grokScore.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 | -5,162,983,504,750,848,000 | 38.0175 | 108 | 0.578587 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tests/python/unittest/test_pass_inject_copy_intrin.py | 1 | 4280 | import tvm
def test_copy2d():
m = tvm.var('m')
l = tvm.var('l')
A = tvm.placeholder((m, l), name='A')
B = tvm.compute((m, l), lambda i, j: A[i, j], name='B')
s = tvm.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')
Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')
stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B: Bb}, 64)
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.strides[0] == l
assert dst.strides[1].value == 1
assert src.strides[0] == l
assert tuple(src.shape) == (m, l)
return tvm.make.Evaluate(0)
stmt = tvm.ir_pass.InjectCopyIntrin(stmt, "memcpy", cb)
def test_copy_pad():
m = tvm.var('m')
l = tvm.var('l')
A = tvm.placeholder((m, l), name='A')
B = tvm.compute((m + 2, l), lambda i, j:
tvm.select(tvm.all(i >= 1, i < m + 1),
A[i - 1, j], 1.0), name='B')
s = tvm.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')
Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')
stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B: Bb}, 64)
def cb(src, dst, pad_before, pad_after, pad_value):
assert tvm.ir_pass.Simplify(src.elem_offset).value == 0
assert pad_before[0].value == 1
assert pad_before[1].value == 0
assert pad_after[0].value == 1
assert pad_after[1].value == 0
assert pad_value.value == 1.0
return tvm.make.Evaluate(0)
stmt = tvm.ir_pass.InjectCopyIntrin(stmt, "memcpy", cb)
def test_single_point_test():
A = tvm.placeholder((1,), name='A')
B = tvm.compute((1,), lambda i:
A[i], name='B')
s = tvm.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')
Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')
stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B: Bb}, 64)
def cb(src, dst, pad_before, pad_after, pad_value):
assert tvm.ir_pass.Simplify(src.elem_offset).value == 0
assert tvm.ir_pass.Simplify(dst.elem_offset).value == 0
assert tvm.ir_pass.Simplify(src.strides[0]).value == 1
assert tvm.ir_pass.Simplify(dst.strides[0]).value == 1
return tvm.make.Evaluate(0)
stmt = tvm.ir_pass.InjectCopyIntrin(stmt, "memcpy", cb)
def assert_expr_equal(a, b):
print(a, b)
assert tvm.ir_pass.Simplify(a - b).value == 0
def test_copy_pad_split():
m = 4 * 3
A = tvm.placeholder((m, ), name="A")
Apad = tvm.compute((m + 2,), lambda i:
tvm.select(tvm.all(i >= 1, i <= m),
A[i - 1], 0.0), "Apad")
B = tvm.compute((m,), lambda i: Apad[i] + Apad[i + 1] + Apad[i + 2])
s = tvm.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=4)
s[Apad].compute_at(s[B], xo)
s[Apad].pragma(s[Apad].op.axis[0], "memcpy")
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')
Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')
stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B: Bb}, 64)
stmt = tvm.ir_pass.Simplify(stmt)
stmt = tvm.ir_pass.CanonicalSimplify(stmt)
def cb(src, dst, pad_before, pad_after, pad_value):
assert(dst.elem_offset.value == 0)
assert_expr_equal(src.elem_offset, tvm.max(xo * 4, 1) - 1)
rpad_before = tvm.max(1 - xo * 4, 0)
rpad_after = tvm.max(xo * 4 - 7, 0)
assert_expr_equal(pad_before[0], rpad_before)
assert_expr_equal(pad_after[0], rpad_after)
assert_expr_equal(src.shape[0], 6 - rpad_before - rpad_after)
return tvm.make.Evaluate(0)
stmt = tvm.ir_pass.InjectCopyIntrin(stmt, "memcpy", cb)
if __name__ == "__main__":
test_copy2d()
test_copy_pad()
test_copy_pad_split()
test_single_point_test()
| apache-2.0 | -1,771,202,018,025,274,000 | 39.761905 | 72 | 0.575 | false |
harshatech2012/dual-raspi-stereo-vision | cameraCalibration.py | 1 | 6862 | import os
import numpy as np
import cv2
# Custom modules
from common import cameraTrigger as ct
from common import constantSource as cs
import cameraRectify as cr
from common import miscellaneous as msc
TOTAL_PICS = cs.getCalibReq()
while True:
q = input("Do you want to perform camera caliberation? (y/n): ")
if q.lower() == 'y':
calibDir = cs.getCalibDataDir(cs.camera)
if not os.path.exists(calibDir):
print("Directory doesn't exist. Creating directory...")
os.makedirs(calibDir)
print("Starting Camera Caliberation...")
print(str(TOTAL_PICS) + " pictures are needed to configure the camera.\n")
while True:
camType = input("Enter the camera that you want to caliberate (1/2): ")
if camType == "1" or camType == "2":
camType = cs.getCamera(camType)
break
else:
print(cs.getMessage(cs.invalid_binary, AB="12"))
checkerBoard = (9, 6)
squareSize = None # square edge length in cm
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((np.product(checkerBoard), 3), np.float32)
objp[:, :2] = np.indices(checkerBoard).T.reshape(-1, 2)
# objp *= squareSize
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
n = 1
while n <= TOTAL_PICS:
path = calibDir + camType + str(format(n, '04')) + ".png"
print("\nPicture No: " + str(n))
input("Press Return/Enter key when ready: ")
# If you remove it also remove the camera number
# & pi label mapping from constants.py
if camType == cs.getCamera(2):
img = ct.takeRemotePic()
else:
img = ct.takePic()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = gray.shape[:2]
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, checkerBoard, None)
# If found, add object points, image points (after refining them)
if ret is True:
print("Good shoot...")
cv2.imwrite(path, img)
cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)
imgpoints.append(corners.reshape(-1, 2))
objpoints.append(objp)
# Draw and display the corners
cv2.drawChessboardCorners(img, checkerBoard, corners, ret)
cv2.imshow('Image', img)
cv2.waitKey(500)
n += 1
else:
print("Image not useful.!! Use a different orientation/position.")
cv2.destroyAllWindows()
# Performing camera calibration
result = cv2.calibrateCamera(objectPoints=objpoints, imagePoints=imgpoints,
imageSize=(w, h), cameraMatrix=None,
distCoeffs=None)
ret, cameraMatrix, distCoeffs, rvecs, tvecs = result
# Final camera specific dataSet
dataSet = (cameraMatrix, distCoeffs, rvecs, tvecs)
while True:
q = input("Would you like to test the camera calibration " +
"parameters before proceeding? (y/n): ")
if q.lower() == 'y':
if camType == cs.getCamera(1):
srcImage = ct.takePic()
else:
srcImage = ct.takeRemotePic()
rectImage = cr.rectifyImage((dataSet[0], dataSet[1]), srcImage, cs.stream_mode)
cv2.imshow("Rectified Image", rectImage)
cv2.waitKey()
cv2.destroyAllWindows()
print("Saving rectified image...")
source = calibDir + camType + "_skewedImage.png"
target = calibDir + camType + "_undistortImage.png"
cv2.imwrite(source, srcImage)
cv2.imwrite(target, rectImage)
break
elif q.lower() == 'n':
print("Canceling calibration parameters test...")
break
else:
print(cs.getMessage(cs.invalid_binary, AB="YN"))
while True:
q = input("Would you like to calculate re-projection error? (y/n): ")
if q.lower() == 'y':
print("Starting error calculation...")
mean_error = 0
tot_error = 0
for i, objpoint in enumerate(objpoints):
imgpoints2 = cv2.projectPoints(objpoint, rvecs[i], tvecs[i],
cameraMatrix, distCoeffs)[0]
# TODO: Problem caused due to differnece in sizes of imgpoints2 and imgpoints
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
tot_error += error
print("Total Error: {total}\nMean Error: {mean}".\
format(total=tot_error, mean=tot_error/len(objpoints)))
break
elif q.lower() == 'n':
print("Canceling error calculation...")
break
else:
print(cs.getMessage(cs.invalid_binary, AB="YN"))
while True:
q = input("Would you like to store the calibration data? (y/n): ")
if q.lower() == 'y':
# Storing the calibration data in .data file
print("\nStoring the following caliberation data: ")
print(" - Camera Matrix\n - Distrotion Coefficients\n - " +
"Rotation Vector\n - Translation Vector\n")
fileDir = cs.getCalibDataDir(cs.root)
fileName = cs.getFileName(cs.camera, prefix=camType)
file = fileDir + fileName
msc.writeData(file, dataSet)
break
elif q.lower() == 'n':
print("Cancelling this will require you to perform the" +
" entire calibration again.!")
q = input("Confirm cancellation? (y/n): ")
if q.lower() == 'y':
print("Data not stored.!")
break
else:
pass
else:
print(cs.getMessage(cs.invalid_binary, AB="YN"))
print("Process completed successfully...")
break
elif q.lower() == 'n':
print("Canceling Caliberation...")
break
else:
print(cs.getMessage(cs.invalid_binary, AB="YN"))
| gpl-3.0 | -6,111,740,889,220,514,000 | 39.60355 | 97 | 0.523463 | false |
mirestrepo/voxels-at-lems | bvpl/bvpl_octree/tests.py | 1 | 2159 | # Temporary tests.
#In this test
#1. Create Scene
#2. Create kernel vector
#3. Apply kernels to the scene
#4. Display results as disks
#import bvpl_octree_batch;
#bvpl_octree_batch.register_processes();
#bvpl_octree_batch.register_datatypes();
#
#class dbvalue:
# def __init__(self, index, type):
# self.id = index # unsigned integer
# self.type = type # string
#
#
#model_dir = "/Users/isa/Experiments/Synthetic";
#output_dir = "/Users/isa/Experiments/Synthetic/edge3d";
#
#
#print("Creating a Scene");
#bvpl_octree_batch.init_process("boxmCreateSceneProcess");
#bvpl_octree_batch.set_input_string(0, model_dir +"/gaussf1_scene.xml");
#bvpl_octree_batch.run_process();
#(scene_id, scene_type) = bvpl_octree_batch.commit_output(0);
#scene= dbvalue(scene_id, scene_type);
#
#print("*************************************");
#print("Creating 3D edge kernel");
#bvpl_octree_batch.init_process("bvplCreateEdge3dKernelVectorProcess");
#bvpl_octree_batch.set_input_unsigned(0,3);
#bvpl_octree_batch.set_input_unsigned(1,3);
#bvpl_octree_batch.set_input_unsigned(2,3);
#bvpl_octree_batch.run_process();
#(kernel_id,kernel_type)= bvpl_octree_batch.commit_output(0);
#kernel_vector = dbvalue(kernel_id,kernel_type);
#
#print("*************************************");
#print("Running Operator");
#bvpl_octree_batch.init_process("bvplSceneVectorOperatorProcess");
#bvpl_octree_batch.set_input_from_db(0,scene);
#bvpl_octree_batch.set_input_from_db(1,kernel_vector);
#bvpl_octree_batch.set_input_string(2,"bsta_gauss_f1");
#bvpl_octree_batch.set_input_string(3,"max_vector_operator");
#bvpl_octree_batch.set_input_string(4,"gauss_convolution");
#bvpl_octree_batch.set_input_string(5, output_dir);
#bvpl_octree_batch.run_process();
#(out_scene_id,out_scene_type)= bvpl_octree_batch.commit_output(0);
#result_scene = dbvalue(out_scene_id,out_scene_type);
#
#
#print("Writing World");
#bvpl_octree_batch.init_process("bvplSaveVrmlProcess");
#bvpl_octree_batch.set_input_from_db(0,result_scene);
#bvpl_octree_batch.set_input_from_db(1,kernel_vector);
#bvpl_octree_batch.set_input_string(2, output_dir +"/result_scene.wrl");
#bvpl_octree_batch.run_process();
| bsd-2-clause | 3,629,797,651,155,927,000 | 34.393443 | 73 | 0.705882 | false |
osantana/correios | documentation/process_correios_status.py | 1 | 1409 | #!/usr/bin/env python3.5
import csv
import re
import sys
result = []
with open(sys.argv[1]) as csvfile:
reader = csv.reader(csvfile)
for raw_row in reader:
tipo, status, descr, detalhe, cliente = raw_row
tipo = tipo.strip().replace("\n", " ")
status = status.strip().replace("\n", " ")
descr = descr.strip().replace("\n", " ")
detalhe = detalhe.strip().replace("\n", " ")
cliente = cliente.strip().replace("\n", " ")
if status:
row = {
'tipo': tipo.split(),
'status': status,
'descr': descr,
'detalhe': detalhe,
'cliente': cliente,
}
result.append(row)
else:
if tipo:
row['tipo'].append(tipo)
row['descr'] = "{} {}".format(row['descr'], descr).strip()
row['detalhe'] = "{} {}".format(row['detalhe'], detalhe).strip()
row['cliente'] = "{} {}".format(row['cliente'], cliente).strip()
writer = csv.writer(sys.stdout)
for res in result:
for tipo in res["tipo"]:
detalhe = res["detalhe"].replace('F avor', 'Favor')
detalhe = re.sub("<.*?>", "", detalhe).strip()
row = [
tipo,
res["status"],
res["descr"],
detalhe,
res["cliente"],
]
writer.writerow(row)
| apache-2.0 | 676,500,828,654,824,400 | 28.978723 | 76 | 0.471966 | false |
pombredanne/anitya | anitya/tests/lib/test_exceptions.py | 1 | 1651 | # -*- coding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
import unittest
from anitya.lib import exceptions
class InvalidVersionTests(unittest.TestCase):
"""Tests for :class:`exceptions.InvalidVersion`."""
def test_str(self):
"""Assert the __str__ method provides a human-readable value."""
e = exceptions.InvalidVersion('notaversion')
self.assertEqual('Invalid version "notaversion"', str(e))
def test_str_with_wrapped_exception(self):
"""Assert the __str__ method provides a human-readable value including the exception."""
e = exceptions.InvalidVersion('notaversion', IOError('womp womp'))
self.assertEqual('Invalid version "notaversion": womp womp', str(e))
| gpl-2.0 | -8,365,337,821,436,643,000 | 43.594595 | 96 | 0.732121 | false |
xenserver/win-xenguestagent | src/branding/branding.py | 1 | 7258 |
branding = {
"toolsName" : "Citrix XenServer Tools",
"installerProductName" : "Citrix XenServer Tools Installer",
"manufacturer" : "Citrix",
"installerKeyWords" : "Citrix XenServer Windows Installer",
"shortTools" : "XenTools",
"installerServiceName" : "Citrix Xen Installer",
"shortInstallerServiceName": "XenPVInstall",
"installWizardName" : "Citrix Xen Install Wizard",
"pvtoolsLong" : "XenServer Windows PV Tools",
"pvTools" : "Citrix Xen PV Tools",
"hypervisorAndOs" : "Citrix Xen Windows",
"pvDrivers" : "PV Drivers",
"driverKeyWords" : "Citrix Xen Drivers",
"driverDescription" : "Citrix Xen Windows Drivers",
"driverComments" : "Paravitualized Windows Drivers For Citrix XenServer",
"pvDriversLong" : "Citrix Xen PV Drivers for Windows",
"hypervisor" : "Xen",
"hypervisorProduct" : "XenServer",
"guestAgentLong" : "Citrix XenServer Windows Guest Agent",
"guestAgent" : "Citrix Xen Guest Agent",
"guestServiceShort" : "XenSvc",
"guestServiceDesc" : "Monitors and provides various metrics to XenStore",
"vssLong" : "Citrix XenServer VSS Provider",
"managementName" : "Windows Management Agent",
"managementDesc" : "Installation and Update Agent",
"installAgentShort" : "InstallAgent",
"installAgentDesc" : "Installs and updates management agent",
"installAgentRegKey" : R"SOFTWARE\\Citrix\\InstallAgent",
"language" : "1033",
"languages" : "1033",
"manufacturerLong" : "Citrix Systems, Inc.",
"toolsForVMs" : "Citrix Tools For Virtual Machines",
"guestLibDesc" : "Citrix Xen Windows Guest Agent Support Library",
"copyrightGuestLib" : "Copyright 2012-2016 Citrix Systems, Inc.",
"copyrightGuestAgent" : "Copyright 2012-2016 Citrix Systems, Inc.",
"copyrightXenDpriv" : "Copyright 2012-2016 Citrix Systems, Inc.",
"xenDprivDesc" : "Citrix XenServer Windows Deprivileged Client",
"setComputerName" : "Set Computer Name",
"errNoWMI" : "Citrix XenServer guest Agent cannotfix XenIface WMI interface",
"GuestAgentLogName" : "XenGuestAgentLog",
"GuestAgentLogSource" : "XenGuestAgent",
"setupErr" : "XenServer Setup.exe error",
"processFail" : "Failed to create process %s %x", #commandline #windows error code
"setupHelp" : "Valid arguments are:\\n /TEST\\n/passive\\n/quiet\\n/norestart\\n/forcerestart",
"noSystemDir" : "Unable to read system directory",
"setupLogDir" : "XSToolSetup",
"copyrightInstallAgent" : "Copyright 2015-2016 Citrix Systems, Inc.",
"copyrightBrandSupport" : "Copyright 2016 Citrix Systems, Inc.",
"copyrightHelperFunctions" : "Copyright 2016 Citrix Systems, Inc.",
"copyrightHardwareDevice" : "Copyright 2016 Citrix Systems, Inc.",
"copyrightPInvokeWrap" : "Copyright 2016 Citrix Systems, Inc.",
"copyrightPVDriversRemoval" : "Copyright 2016 Citrix Systems, Inc.",
"copyrightUninstall" : "Copyright 2016 Citrix Systems, Inc.",
"errMSINoMem":"Insufficient memory to allocate msiexec string",
"errFilePathNoMem":"Insufficient memory to get file path",
"errNoLogPath":"Can't get logging path",
"errCmdLineNoMem":"Insufficient memory to allocate cmdline string",
"errMSIInstallFail":"The MSI Install failed with exit code %d\\nSee %s for more details", #MSI exit code, #Log File Location
"errDotNetNeeded":"Microsoft .Net Framework 3.5 or higher is required",
"twoCharBrand":"XS",
"updater" : "ManagementAgentUpdater",
"copyrightUpdater" : "Copyright 2016 Citrix Systems, Inc.",
"updaterURL" : "https://pvupdates.vmd.citrix.com/updates.tsv",
"updaterLong" : "Citrix Management Agent Auto-Updater",
"updaterDesc" : "Automatically checks and updates XenServer tools",
"laterVersion" : "A later version of Windows Management Agent is already installed. Setup will now exit",
"windowsRequired" : "This version of the XenServer Windows Management Agent requires Windows Vista, Windows Server 2008 or Later. For Windows XP and 2003 try installing XenLegacy.exe",
"evtServiceStarting" : "Service Starting",
"evtException" : "Exception: ",
"evtServiceStopping" : "Service Stopping",
"evtStopLock" : "Service Stopping (locked)",
"evtStopShutdown" : "Service Stopping (shutdown)",
"evtStopNothing" : "Service Stopping (nothing running)",
"evtStopJoin" : "Service Stopping (joining thread)",
"evtStopped" : "Service Stopping (done)",
"32BitNotOn64" : "Please install the 64 bit version of this package on 64 bit systems",
"allowAutoUpdate" : "YES",
"allowDriverUpdate" : "NO",
"allowDriverInstall" : "YES",
"installAndUpdateTitle" : "Installation and Update Settings",
"installAndUpdateDesc" : "Click Next to accept recommended settings",
"ioDesc" : "I/O drivers improve performance, functionality and reliability",
"ioInstall" : "Install I/O Drivers Now",
"mgmtDesc" : "The management agent automatically updates itself when new versions are available",
"mgmtAllow" : "Allow automatic management agent updates",
"mgmtDisallow" : "Disallow automatic management agent updates",
"ioUpdDesc" : "The management agent can install I/O drivers when new versions are available",
"ioUpdAllow" : "Allow automatic I/O driver updates by the management agent",
"ioUpdDisallow" : "Disallow automatic I/O driver updates by the management agent",
"updDisclaim" : "Automatic updates may be overridden by pool policies",
"whqlWarn" : "Customers using Windows Update for I/O driver updates should not select this option",
"userAgent" : "Citrix XenServer AutoUpdate",
}
filenames = {
"legacy" : "XenLegacy.Exe",
"legacyuninstallerfix" : "xluninstallerfix.exe",
"driversmsix86" : "CitrixXenDriversX86.msi",
"driversmsix64" : "CitrixXenDriversX64.msi",
"vssmsix86" : "CitrixVssX86.msi",
"vssmsix64" : "CitrixVssX64.msi",
"guestagentmsix86" : "CitrixGuestAgentX86.msi",
"guestagentmsix64" : "CitrixGuestAgentX64.msi",
"installwizard" : "InstallWizard.msi",
"managementx64" : "managementagentx64.msi",
"managementx86" : "managementagentx86.msi",
"setup" : "setup.exe",
"dpriv" : "XenDpriv.exe",
"dprivcfg" : "XenDpriv.exe.config",
"agent" : "XenGuestAgent.exe",
"agentcfg" : "XenGuestAgent.exe.config",
"installVSS" : "install-XenProvider.cmd",
"uninstallVSS" : "uninstall-XenProvider.cmd",
}
resources = {
"icon" : "xen.ico",
}
bitmaps = "..\\..\\src\\bitmaps"
languagecode = {
"culture" : "enus",
"language" : "0x09",
"sublang" : "0x04",
}
cultures = {
"default" : "en-us",
"others" : [],
}
| bsd-2-clause | 2,561,739,394,468,852,000 | 50.978102 | 193 | 0.639019 | false |
ainur-fa/python_training_1 | fixture/db.py | 1 | 2123 | import pymysql.cursors
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host=host
self.name=name
self.user =user
self.password=password
self.connection=pymysql.connect(host=host, database=name, user=user,password=password)
self.connection.autocommit(True)
def get_group_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname))
finally:
cursor.close()
return list
def get_contact_fields(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, address, home, mobile, work, email, email2, email3, phone2 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, address, home, mobile, work, email, email2, email3, phone2) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address,
homephone=home, mobilephone=mobile, workphone=work,
email=email, email2=email2, email3=email3, secondaryphone=phone2))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | apache-2.0 | 5,126,857,193,983,882,000 | 37.618182 | 177 | 0.595384 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Mac/BuildScript/build-installer.py | 1 | 62264 | #!/usr/bin/env python
"""
This script is used to build "official" universal installers on Mac OS X.
It requires at least Mac OS X 10.5, Xcode 3, and the 10.4u SDK for
32-bit builds. 64-bit or four-way universal builds require at least
OS X 10.5 and the 10.5 SDK.
Please ensure that this script keeps working with Python 2.5, to avoid
bootstrap issues (/usr/bin/python is Python 2.5 on OSX 10.5). Sphinx,
which is used to build the documentation, currently requires at least
Python 2.4. However, as of Python 3.4.1, Doc builds require an external
sphinx-build and the current versions of Sphinx now require at least
Python 2.6.
In addition to what is supplied with OS X 10.5+ and Xcode 3+, the script
requires an installed third-party version of
Tcl/Tk 8.4 (for OS X 10.4 and 10.5 deployment targets) or Tcl/TK 8.5
(for 10.6 or later) installed in /Library/Frameworks. When installed,
the Python built by this script will attempt to dynamically link first to
Tcl and Tk frameworks in /Library/Frameworks if available otherwise fall
back to the ones in /System/Library/Framework. For the build, we recommend
installing the most recent ActiveTcl 8.4 or 8.5 version.
32-bit-only installer builds are still possible on OS X 10.4 with Xcode 2.5
and the installation of additional components, such as a newer Python
(2.5 is needed for Python parser updates) and for the documentation
build either svn (pre-3.4.1) or sphinx-build (3.4.1 and later).
Usage: see USAGE variable in the script.
"""
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
STAT_0o755 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
INCLUDE_TIMESTAMP = 1
VERBOSE = 1
from plistlib import Plist
try:
from plistlib import writePlist
except ImportError:
# We're run using python2.3
def writePlist(plist, path):
plist.write(path)
def shellQuote(value):
"""
Return the string value in a form that can safely be inserted into
a shell command.
"""
return "'%s'"%(value.replace("'", "'\"'\"'"))
def grepValue(fn, variable):
"""
Return the unquoted value of a variable from a file..
QUOTED_VALUE='quotes' -> str('quotes')
UNQUOTED_VALUE=noquotes -> str('noquotes')
"""
variable = variable + '='
for ln in open(fn, 'r'):
if ln.startswith(variable):
value = ln[len(variable):].strip()
return value.strip("\"'")
raise RuntimeError("Cannot find variable %s" % variable[:-1])
_cache_getVersion = None
def getVersion():
global _cache_getVersion
if _cache_getVersion is None:
_cache_getVersion = grepValue(
os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
return _cache_getVersion
def getVersionMajorMinor():
return tuple([int(n) for n in getVersion().split('.', 2)])
_cache_getFullVersion = None
def getFullVersion():
global _cache_getFullVersion
if _cache_getFullVersion is not None:
return _cache_getFullVersion
fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h')
for ln in open(fn):
if 'PY_VERSION' in ln:
_cache_getFullVersion = ln.split()[-1][1:-1]
return _cache_getFullVersion
raise RuntimeError("Cannot find full version??")
FW_PREFIX = ["Library", "Frameworks", "Python.framework"]
FW_VERSION_PREFIX = "--undefined--" # initialized in parseOptions
# The directory we'll use to create the build (will be erased and recreated)
WORKDIR = "/tmp/_py"
# The directory we'll use to store third-party sources. Set this to something
# else if you don't want to re-fetch required libraries every time.
DEPSRC = os.path.join(WORKDIR, 'third-party')
DEPSRC = os.path.expanduser('~/Universal/other-sources')
# Location of the preferred SDK
### There are some issues with the SDK selection below here,
### The resulting binary doesn't work on all platforms that
### it should. Always default to the 10.4u SDK until that
### issue is resolved.
###
##if int(os.uname()[2].split('.')[0]) == 8:
## # Explicitly use the 10.4u (universal) SDK when
## # building on 10.4, the system headers are not
## # useable for a universal build
## SDKPATH = "/Developer/SDKs/MacOSX10.4u.sdk"
##else:
## SDKPATH = "/"
SDKPATH = "/Developer/SDKs/MacOSX10.4u.sdk"
universal_opts_map = { '32-bit': ('i386', 'ppc',),
'64-bit': ('x86_64', 'ppc64',),
'intel': ('i386', 'x86_64'),
'3-way': ('ppc', 'i386', 'x86_64'),
'all': ('i386', 'ppc', 'x86_64', 'ppc64',) }
default_target_map = {
'64-bit': '10.5',
'3-way': '10.5',
'intel': '10.5',
'all': '10.5',
}
UNIVERSALOPTS = tuple(universal_opts_map.keys())
UNIVERSALARCHS = '32-bit'
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
# Source directory (assume we're in Mac/BuildScript)
SRCDIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__
))))
# $MACOSX_DEPLOYMENT_TARGET -> minimum OS X level
DEPTARGET = '10.3'
def getDeptargetTuple():
return tuple([int(n) for n in DEPTARGET.split('.')[0:2]])
def getTargetCompilers():
target_cc_map = {
'10.3': ('gcc-4.0', 'g++-4.0'),
'10.4': ('gcc-4.0', 'g++-4.0'),
'10.5': ('gcc-4.2', 'g++-4.2'),
'10.6': ('gcc-4.2', 'g++-4.2'),
}
return target_cc_map.get(DEPTARGET, ('clang', 'clang++') )
CC, CXX = getTargetCompilers()
PYTHON_3 = getVersionMajorMinor() >= (3, 0)
USAGE = textwrap.dedent("""\
Usage: build_python [options]
Options:
-? or -h: Show this message
-b DIR
--build-dir=DIR: Create build here (default: %(WORKDIR)r)
--third-party=DIR: Store third-party sources here (default: %(DEPSRC)r)
--sdk-path=DIR: Location of the SDK (default: %(SDKPATH)r)
--src-dir=DIR: Location of the Python sources (default: %(SRCDIR)r)
--dep-target=10.n OS X deployment target (default: %(DEPTARGET)r)
--universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r)
""")% globals()
# Dict of object file names with shared library names to check after building.
# This is to ensure that we ended up dynamically linking with the shared
# library paths and versions we expected. For example:
# EXPECTED_SHARED_LIBS['_tkinter.so'] = [
# '/Library/Frameworks/Tcl.framework/Versions/8.5/Tcl',
# '/Library/Frameworks/Tk.framework/Versions/8.5/Tk']
EXPECTED_SHARED_LIBS = {}
# List of names of third party software built with this installer.
# The names will be inserted into the rtf version of the License.
THIRD_PARTY_LIBS = []
# Instructions for building libraries that are necessary for building a
# batteries included python.
# [The recipes are defined here for convenience but instantiated later after
# command line options have been processed.]
def library_recipes():
result = []
LT_10_5 = bool(getDeptargetTuple() < (10, 5))
if not (10, 5) < getDeptargetTuple() < (10, 10):
# The OpenSSL libs shipped with OS X 10.5 and earlier are
# hopelessly out-of-date and do not include Apple's tie-in to
# the root certificates in the user and system keychains via TEA
# that was introduced in OS X 10.6. Note that this applies to
# programs built and linked with a 10.5 SDK even when run on
# newer versions of OS X.
#
# Dealing with CAs is messy. For now, just supply a
# local libssl and libcrypto for the older installer variants
# (e.g. the python.org 10.5+ 32-bit-only installer) that use the
# same default ssl certfile location as the system libs do:
# /System/Library/OpenSSL/cert.pem
# Then at least TLS connections can be negotiated with sites that
# use sha-256 certs like python.org, assuming the proper CA certs
# have been supplied. The default CA cert management issues for
# 10.5 and earlier builds are the same as before, other than it is
# now more obvious with cert checking enabled by default in the
# standard library.
#
# For builds with 10.6 through 10.9 SDKs,
# continue to use the deprecated but
# less out-of-date Apple 0.9.8 libs for now. While they are less
# secure than using an up-to-date 1.0.1 version, doing so
# avoids the big problems of forcing users to have to manage
# default CAs themselves, thanks to the Apple libs using private TEA
# APIs for cert validation from keychains if validation using the
# standard OpenSSL locations (/System/Library/OpenSSL, normally empty)
# fails.
#
# Since Apple removed the header files for the deprecated system
# OpenSSL as of the Xcode 7 release (for OS X 10.10+), we do not
# have much choice but to build our own copy here, too.
result.extend([
dict(
name="OpenSSL 1.0.2k",
url="https://www.openssl.org/source/openssl-1.0.2k.tar.gz",
checksum='f965fc0bf01bf882b31314b61391ae65',
patches=[
"openssl_sdk_makedepend.patch",
],
buildrecipe=build_universal_openssl,
configure=None,
install=None,
),
])
# Disable for now
if False: # if getDeptargetTuple() > (10, 5):
result.extend([
dict(
name="Tcl 8.5.15",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_5/tcl8.5.15-src.tar.gz",
checksum='f3df162f92c69b254079c4d0af7a690f',
buildDir="unix",
configure_pre=[
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.5'%(getVersion())),
},
),
dict(
name="Tk 8.5.15",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_5/tk8.5.15-src.tar.gz",
checksum='55b8e33f903210a4e1c8bce0f820657f',
patches=[
"issue19373_tk_8_5_15_source.patch",
],
buildDir="unix",
configure_pre=[
'--enable-aqua',
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.5'%(getVersion())),
"TK_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tk8.5'%(getVersion())),
},
),
])
if PYTHON_3:
result.extend([
dict(
name="XZ 5.0.5",
url="http://tukaani.org/xz/xz-5.0.5.tar.gz",
checksum='19d924e066b6fff0bc9d1981b4e53196',
configure_pre=[
'--disable-dependency-tracking',
]
),
])
result.extend([
dict(
name="NCurses 5.9",
url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz",
checksum='8cb9c412e5f2d96bc6f459aa8c6282a1',
configure_pre=[
"--enable-widec",
"--without-cxx",
"--without-cxx-binding",
"--without-ada",
"--without-curses-h",
"--enable-shared",
"--with-shared",
"--without-debug",
"--without-normal",
"--without-tests",
"--without-manpages",
"--datadir=/usr/share",
"--sysconfdir=/etc",
"--sharedstatedir=/usr/com",
"--with-terminfo-dirs=/usr/share/terminfo",
"--with-default-terminfo-dir=/usr/share/terminfo",
"--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
],
patchscripts=[
("ftp://invisible-island.net/ncurses//5.9/ncurses-5.9-20120616-patch.sh.bz2",
"f54bf02a349f96a7c4f0d00922f3a0d4"),
],
useLDFlags=False,
install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
shellQuote(os.path.join(WORKDIR, 'libraries')),
getVersion(),
),
),
dict(
name="SQLite 3.8.11",
url="https://www.sqlite.org/2015/sqlite-autoconf-3081100.tar.gz",
checksum='77b451925121028befbddbf45ea2bc49',
extra_cflags=('-Os '
'-DSQLITE_ENABLE_FTS4 '
'-DSQLITE_ENABLE_FTS3_PARENTHESIS '
'-DSQLITE_ENABLE_RTREE '
'-DSQLITE_TCL=0 '
'%s' % ('','-DSQLITE_WITHOUT_ZONEMALLOC ')[LT_10_5]),
configure_pre=[
'--enable-threadsafe',
'--enable-shared=no',
'--enable-static=yes',
'--disable-readline',
'--disable-dependency-tracking',
]
),
])
if getDeptargetTuple() < (10, 5):
result.extend([
dict(
name="Bzip2 1.0.6",
url="http://bzip.org/1.0.6/bzip2-1.0.6.tar.gz",
checksum='00b516f4704d4a7cb50a1d97e6e8e15b',
configure=None,
install='make install CC=%s CXX=%s, PREFIX=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
),
),
dict(
name="ZLib 1.2.3",
url="http://www.gzip.org/zlib/zlib-1.2.3.tar.gz",
checksum='debc62758716a169df9f62e6ab2bc634',
configure=None,
install='make install CC=%s CXX=%s, prefix=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
),
),
dict(
# Note that GNU readline is GPL'd software
name="GNU Readline 6.1.2",
url="http://ftp.gnu.org/pub/gnu/readline/readline-6.1.tar.gz" ,
checksum='fc2f7e714fe792db1ce6ddc4c9fb4ef3',
patchlevel='0',
patches=[
# The readline maintainers don't do actual micro releases, but
# just ship a set of patches.
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-001',
'c642f2e84d820884b0bf9fd176bc6c3f'),
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-002',
'1a76781a1ea734e831588285db7ec9b1'),
]
),
])
if not PYTHON_3:
result.extend([
dict(
name="Sleepycat DB 4.7.25",
url="http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz",
checksum='ec2b87e833779681a0c3a814aa71359e',
buildDir="build_unix",
configure="../dist/configure",
configure_pre=[
'--includedir=/usr/local/include/db4',
]
),
])
return result
# Instructions for building packages inside the .mpkg.
def pkg_recipes():
unselected_for_python3 = ('selected', 'unselected')[PYTHON_3]
result = [
dict(
name="PythonFramework",
long_name="Python Framework",
source="/Library/Frameworks/Python.framework",
readme="""\
This package installs Python.framework, that is the python
interpreter and the standard library. This also includes Python
wrappers for lots of Mac OS X API's.
""",
postflight="scripts/postflight.framework",
selected='selected',
),
dict(
name="PythonApplications",
long_name="GUI Applications",
source="/Applications/Python %(VER)s",
readme="""\
This package installs IDLE (an interactive Python IDE),
Python Launcher and Build Applet (create application bundles
from python scripts).
It also installs a number of examples and demos.
""",
required=False,
selected='selected',
),
dict(
name="PythonUnixTools",
long_name="UNIX command-line tools",
source="/usr/local/bin",
readme="""\
This package installs the unix tools in /usr/local/bin for
compatibility with older releases of Python. This package
is not necessary to use Python.
""",
required=False,
selected='selected',
),
dict(
name="PythonDocumentation",
long_name="Python Documentation",
topdir="/Library/Frameworks/Python.framework/Versions/%(VER)s/Resources/English.lproj/Documentation",
source="/pydocs",
readme="""\
This package installs the python documentation at a location
that is useable for pydoc and IDLE.
""",
postflight="scripts/postflight.documentation",
required=False,
selected='selected',
),
dict(
name="PythonProfileChanges",
long_name="Shell profile updater",
readme="""\
This packages updates your shell profile to make sure that
the Python tools are found by your shell in preference of
the system provided Python tools.
If you don't install this package you'll have to add
"/Library/Frameworks/Python.framework/Versions/%(VER)s/bin"
to your PATH by hand.
""",
postflight="scripts/postflight.patch-profile",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
dict(
name="PythonInstallPip",
long_name="Install or upgrade pip",
readme="""\
This package installs (or upgrades from an earlier version)
pip, a tool for installing and managing Python packages.
""",
postflight="scripts/postflight.ensurepip",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
]
if getDeptargetTuple() < (10, 4) and not PYTHON_3:
result.append(
dict(
name="PythonSystemFixes",
long_name="Fix system Python",
readme="""\
This package updates the system python installation on
Mac OS X 10.3 to ensure that you can build new python extensions
using that copy of python after installing this version.
""",
postflight="../Tools/fixapplepython23.py",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected=unselected_for_python3,
)
)
return result
def fatal(msg):
"""
A fatal error, bail out.
"""
sys.stderr.write('FATAL: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(1)
def fileContents(fn):
"""
Return the contents of the named file
"""
return open(fn, 'r').read()
def runCommand(commandline):
"""
Run a command and raise RuntimeError if it fails. Output is suppressed
unless the command fails.
"""
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
if VERBOSE:
sys.stdout.write(data); sys.stdout.flush()
def captureCommand(commandline):
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
return data
def getTclTkVersion(configfile, versionline):
"""
search Tcl or Tk configuration file for version line
"""
try:
f = open(configfile, "r")
except OSError:
fatal("Framework configuration file not found: %s" % configfile)
for l in f:
if l.startswith(versionline):
f.close()
return l
fatal("Version variable %s not found in framework configuration file: %s"
% (versionline, configfile))
def checkEnvironment():
"""
Check that we're running on a supported system.
"""
if sys.version_info[0:2] < (2, 4):
fatal("This script must be run with Python 2.4 or later")
if platform.system() != 'Darwin':
fatal("This script should be run on a Mac OS X 10.4 (or later) system")
if int(platform.release().split('.')[0]) < 8:
fatal("This script should be run on a Mac OS X 10.4 (or later) system")
if not os.path.exists(SDKPATH):
fatal("Please install the latest version of Xcode and the %s SDK"%(
os.path.basename(SDKPATH[:-4])))
# Because we only support dynamic load of only one major/minor version of
# Tcl/Tk, ensure:
# 1. there are no user-installed frameworks of Tcl/Tk with version
# higher than the Apple-supplied system version in
# SDKROOT/System/Library/Frameworks
# 2. there is a user-installed framework (usually ActiveTcl) in (or linked
# in) SDKROOT/Library/Frameworks with the same version as the system
# version. This allows users to choose to install a newer patch level.
frameworks = {}
for framework in ['Tcl', 'Tk']:
fwpth = 'Library/Frameworks/%s.framework/Versions/Current' % framework
sysfw = os.path.join(SDKPATH, 'System', fwpth)
libfw = os.path.join(SDKPATH, fwpth)
usrfw = os.path.join(os.getenv('HOME'), fwpth)
frameworks[framework] = os.readlink(sysfw)
if not os.path.exists(libfw):
fatal("Please install a link to a current %s %s as %s so "
"the user can override the system framework."
% (framework, frameworks[framework], libfw))
if os.readlink(libfw) != os.readlink(sysfw):
fatal("Version of %s must match %s" % (libfw, sysfw) )
if os.path.exists(usrfw):
fatal("Please rename %s to avoid possible dynamic load issues."
% usrfw)
if frameworks['Tcl'] != frameworks['Tk']:
fatal("The Tcl and Tk frameworks are not the same version.")
# add files to check after build
EXPECTED_SHARED_LIBS['_tkinter.so'] = [
"/Library/Frameworks/Tcl.framework/Versions/%s/Tcl"
% frameworks['Tcl'],
"/Library/Frameworks/Tk.framework/Versions/%s/Tk"
% frameworks['Tk'],
]
# Remove inherited environment variables which might influence build
environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_',
'LD_', 'LIBRARY_', 'PATH', 'PYTHON']
for ev in list(os.environ):
for prefix in environ_var_prefixes:
if ev.startswith(prefix) :
print("INFO: deleting environment variable %s=%s" % (
ev, os.environ[ev]))
del os.environ[ev]
base_path = '/bin:/sbin:/usr/bin:/usr/sbin'
if 'SDK_TOOLS_BIN' in os.environ:
base_path = os.environ['SDK_TOOLS_BIN'] + ':' + base_path
# Xcode 2.5 on OS X 10.4 does not include SetFile in its usr/bin;
# add its fixed location here if it exists
OLD_DEVELOPER_TOOLS = '/Developer/Tools'
if os.path.isdir(OLD_DEVELOPER_TOOLS):
base_path = base_path + ':' + OLD_DEVELOPER_TOOLS
os.environ['PATH'] = base_path
print("Setting default PATH: %s"%(os.environ['PATH']))
# Ensure we have access to sphinx-build.
# You may have to create a link in /usr/bin for it.
runCommand('sphinx-build --version')
def parseOptions(args=None):
"""
Parse arguments and update global settings.
"""
global WORKDIR, DEPSRC, SDKPATH, SRCDIR, DEPTARGET
global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC, CXX
global FW_VERSION_PREFIX
if args is None:
args = sys.argv[1:]
try:
options, args = getopt.getopt(args, '?hb',
[ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=',
'dep-target=', 'universal-archs=', 'help' ])
except getopt.GetoptError:
print(sys.exc_info()[1])
sys.exit(1)
if args:
print("Additional arguments")
sys.exit(1)
deptarget = None
for k, v in options:
if k in ('-h', '-?', '--help'):
print(USAGE)
sys.exit(0)
elif k in ('-d', '--build-dir'):
WORKDIR=v
elif k in ('--third-party',):
DEPSRC=v
elif k in ('--sdk-path',):
SDKPATH=v
elif k in ('--src-dir',):
SRCDIR=v
elif k in ('--dep-target', ):
DEPTARGET=v
deptarget=v
elif k in ('--universal-archs', ):
if v in UNIVERSALOPTS:
UNIVERSALARCHS = v
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
if deptarget is None:
# Select alternate default deployment
# target
DEPTARGET = default_target_map.get(v, '10.3')
else:
raise NotImplementedError(v)
else:
raise NotImplementedError(k)
SRCDIR=os.path.abspath(SRCDIR)
WORKDIR=os.path.abspath(WORKDIR)
SDKPATH=os.path.abspath(SDKPATH)
DEPSRC=os.path.abspath(DEPSRC)
CC, CXX = getTargetCompilers()
FW_VERSION_PREFIX = FW_PREFIX[:] + ["Versions", getVersion()]
print("-- Settings:")
print(" * Source directory: %s" % SRCDIR)
print(" * Build directory: %s" % WORKDIR)
print(" * SDK location: %s" % SDKPATH)
print(" * Third-party source: %s" % DEPSRC)
print(" * Deployment target: %s" % DEPTARGET)
print(" * Universal archs: %s" % str(ARCHLIST))
print(" * C compiler: %s" % CC)
print(" * C++ compiler: %s" % CXX)
print("")
print(" -- Building a Python %s framework at patch level %s"
% (getVersion(), getFullVersion()))
print("")
def extractArchive(builddir, archiveName):
"""
Extract a source archive into 'builddir'. Returns the path of the
extracted archive.
XXX: This function assumes that archives contain a toplevel directory
that is has the same name as the basename of the archive. This is
safe enough for almost anything we use. Unfortunately, it does not
work for current Tcl and Tk source releases where the basename of
the archive ends with "-src" but the uncompressed directory does not.
For now, just special case Tcl and Tk tar.gz downloads.
"""
curdir = os.getcwd()
try:
os.chdir(builddir)
if archiveName.endswith('.tar.gz'):
retval = os.path.basename(archiveName[:-7])
if ((retval.startswith('tcl') or retval.startswith('tk'))
and retval.endswith('-src')):
retval = retval[:-4]
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar zxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar.bz2'):
retval = os.path.basename(archiveName[:-8])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar jxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar xf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.zip'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("unzip %s 2>&1"%(shellQuote(archiveName),), 'r')
data = fp.read()
xit = fp.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("Cannot extract %s"%(archiveName,))
return os.path.join(builddir, retval)
finally:
os.chdir(curdir)
def downloadURL(url, fname):
"""
Download the contents of the url into the file.
"""
fpIn = urllib_request.urlopen(url)
fpOut = open(fname, 'wb')
block = fpIn.read(10240)
try:
while block:
fpOut.write(block)
block = fpIn.read(10240)
fpIn.close()
fpOut.close()
except:
try:
os.unlink(fname)
except OSError:
pass
def verifyThirdPartyFile(url, checksum, fname):
"""
Download file from url to filename fname if it does not already exist.
Abort if file contents does not match supplied md5 checksum.
"""
name = os.path.basename(fname)
if os.path.exists(fname):
print("Using local copy of %s"%(name,))
else:
print("Did not find local copy of %s"%(name,))
print("Downloading %s"%(name,))
downloadURL(url, fname)
print("Archive for %s stored as %s"%(name, fname))
if os.system(
'MD5=$(openssl md5 %s) ; test "${MD5##*= }" = "%s"'
% (shellQuote(fname), checksum) ):
fatal('MD5 checksum mismatch for file %s' % fname)
def build_universal_openssl(basedir, archList):
"""
Special case build recipe for universal build of openssl.
The upstream OpenSSL build system does not directly support
OS X universal builds. We need to build each architecture
separately then lipo them together into fat libraries.
"""
# OpenSSL fails to build with Xcode 2.5 (on OS X 10.4).
# If we are building on a 10.4.x or earlier system,
# unilaterally disable assembly code building to avoid the problem.
no_asm = int(platform.release().split(".")[0]) < 9
def build_openssl_arch(archbase, arch):
"Build one architecture of openssl"
arch_opts = {
"i386": ["darwin-i386-cc"],
"x86_64": ["darwin64-x86_64-cc", "enable-ec_nistp_64_gcc_128"],
"ppc": ["darwin-ppc-cc"],
"ppc64": ["darwin64-ppc-cc"],
}
configure_opts = [
"no-krb5",
"no-idea",
"no-mdc2",
"no-rc5",
"no-zlib",
"enable-tlsext",
"no-ssl2",
"no-ssl3",
"no-ssl3-method",
# "enable-unit-test",
"shared",
"--install_prefix=%s"%shellQuote(archbase),
"--prefix=%s"%os.path.join("/", *FW_VERSION_PREFIX),
"--openssldir=/System/Library/OpenSSL",
]
if no_asm:
configure_opts.append("no-asm")
runCommand(" ".join(["perl", "Configure"]
+ arch_opts[arch] + configure_opts))
runCommand("make depend OSX_SDK=%s" % SDKPATH)
runCommand("make all OSX_SDK=%s" % SDKPATH)
runCommand("make install_sw OSX_SDK=%s" % SDKPATH)
# runCommand("make test")
return
srcdir = os.getcwd()
universalbase = os.path.join(srcdir, "..",
os.path.basename(srcdir) + "-universal")
os.mkdir(universalbase)
archbasefws = []
for arch in archList:
# fresh copy of the source tree
archsrc = os.path.join(universalbase, arch, "src")
shutil.copytree(srcdir, archsrc, symlinks=True)
# install base for this arch
archbase = os.path.join(universalbase, arch, "root")
os.mkdir(archbase)
# Python framework base within install_prefix:
# the build will install into this framework..
# This is to ensure that the resulting shared libs have
# the desired real install paths built into them.
archbasefw = os.path.join(archbase, *FW_VERSION_PREFIX)
# build one architecture
os.chdir(archsrc)
build_openssl_arch(archbase, arch)
os.chdir(srcdir)
archbasefws.append(archbasefw)
# copy arch-independent files from last build into the basedir framework
basefw = os.path.join(basedir, *FW_VERSION_PREFIX)
shutil.copytree(
os.path.join(archbasefw, "include", "openssl"),
os.path.join(basefw, "include", "openssl")
)
shlib_version_number = grepValue(os.path.join(archsrc, "Makefile"),
"SHLIB_VERSION_NUMBER")
# e.g. -> "1.0.0"
libcrypto = "libcrypto.dylib"
libcrypto_versioned = libcrypto.replace(".", "."+shlib_version_number+".")
# e.g. -> "libcrypto.1.0.0.dylib"
libssl = "libssl.dylib"
libssl_versioned = libssl.replace(".", "."+shlib_version_number+".")
# e.g. -> "libssl.1.0.0.dylib"
try:
os.mkdir(os.path.join(basefw, "lib"))
except OSError:
pass
# merge the individual arch-dependent shared libs into a fat shared lib
archbasefws.insert(0, basefw)
for (lib_unversioned, lib_versioned) in [
(libcrypto, libcrypto_versioned),
(libssl, libssl_versioned)
]:
runCommand("lipo -create -output " +
" ".join(shellQuote(
os.path.join(fw, "lib", lib_versioned))
for fw in archbasefws))
# and create an unversioned symlink of it
os.symlink(lib_versioned, os.path.join(basefw, "lib", lib_unversioned))
# Create links in the temp include and lib dirs that will be injected
# into the Python build so that setup.py can find them while building
# and the versioned links so that the setup.py post-build import test
# does not fail.
relative_path = os.path.join("..", "..", "..", *FW_VERSION_PREFIX)
for fn in [
["include", "openssl"],
["lib", libcrypto],
["lib", libssl],
["lib", libcrypto_versioned],
["lib", libssl_versioned],
]:
os.symlink(
os.path.join(relative_path, *fn),
os.path.join(basedir, "usr", "local", *fn)
)
return
def buildRecipe(recipe, basedir, archList):
"""
Build software using a recipe. This function does the
'configure;make;make install' dance for C software, with a possibility
to customize this process, basically a poor-mans DarwinPorts.
"""
curdir = os.getcwd()
name = recipe['name']
THIRD_PARTY_LIBS.append(name)
url = recipe['url']
configure = recipe.get('configure', './configure')
buildrecipe = recipe.get('buildrecipe', None)
install = recipe.get('install', 'make && make install DESTDIR=%s'%(
shellQuote(basedir)))
archiveName = os.path.split(url)[-1]
sourceArchive = os.path.join(DEPSRC, archiveName)
if not os.path.exists(DEPSRC):
os.mkdir(DEPSRC)
verifyThirdPartyFile(url, recipe['checksum'], sourceArchive)
print("Extracting archive for %s"%(name,))
buildDir=os.path.join(WORKDIR, '_bld')
if not os.path.exists(buildDir):
os.mkdir(buildDir)
workDir = extractArchive(buildDir, sourceArchive)
os.chdir(workDir)
for patch in recipe.get('patches', ()):
if isinstance(patch, tuple):
url, checksum = patch
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patch)
runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1),
shellQuote(fn),))
for patchscript in recipe.get('patchscripts', ()):
if isinstance(patchscript, tuple):
url, checksum = patchscript
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patchscript)
if fn.endswith('.bz2'):
runCommand('bunzip2 -fk %s' % shellQuote(fn))
fn = fn[:-4]
runCommand('sh %s' % shellQuote(fn))
os.unlink(fn)
if 'buildDir' in recipe:
os.chdir(recipe['buildDir'])
if configure is not None:
configure_args = [
"--prefix=/usr/local",
"--enable-static",
"--disable-shared",
#"CPP=gcc -arch %s -E"%(' -arch '.join(archList,),),
]
if 'configure_pre' in recipe:
args = list(recipe['configure_pre'])
if '--disable-static' in args:
configure_args.remove('--enable-static')
if '--enable-shared' in args:
configure_args.remove('--disable-shared')
configure_args.extend(args)
if recipe.get('useLDFlags', 1):
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s -isysroot %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
"LDFLAGS=-mmacosx-version-min=%s -isysroot %s -L%s/usr/local/lib -arch %s"%(
DEPTARGET,
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],
' -arch '.join(archList)),
])
else:
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s -isysroot %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
])
if 'configure_post' in recipe:
configure_args = configure_args + list(recipe['configure_post'])
configure_args.insert(0, configure)
configure_args = [ shellQuote(a) for a in configure_args ]
print("Running configure for %s"%(name,))
runCommand(' '.join(configure_args) + ' 2>&1')
if buildrecipe is not None:
# call special-case build recipe, e.g. for openssl
buildrecipe(basedir, archList)
if install is not None:
print("Running install for %s"%(name,))
runCommand('{ ' + install + ' ;} 2>&1')
print("Done %s"%(name,))
print("")
os.chdir(curdir)
def buildLibraries():
"""
Build our dependencies into $WORKDIR/libraries/usr/local
"""
print("")
print("Building required libraries")
print("")
universal = os.path.join(WORKDIR, 'libraries')
os.mkdir(universal)
os.makedirs(os.path.join(universal, 'usr', 'local', 'lib'))
os.makedirs(os.path.join(universal, 'usr', 'local', 'include'))
for recipe in library_recipes():
buildRecipe(recipe, universal, ARCHLIST)
def buildPythonDocs():
# This stores the documentation as Resources/English.lproj/Documentation
# inside the framwork. pydoc and IDLE will pick it up there.
print("Install python documentation")
rootDir = os.path.join(WORKDIR, '_root')
buildDir = os.path.join('../../Doc')
docdir = os.path.join(rootDir, 'pydocs')
curDir = os.getcwd()
os.chdir(buildDir)
runCommand('make clean')
# Create virtual environment for docs builds with blurb and sphinx
runCommand('make venv')
runCommand('make html PYTHON=venv/bin/python')
os.chdir(curDir)
if not os.path.exists(docdir):
os.mkdir(docdir)
os.rename(os.path.join(buildDir, 'build', 'html'), docdir)
def buildPython():
print("Building a universal python for %s architectures" % UNIVERSALARCHS)
buildDir = os.path.join(WORKDIR, '_bld', 'python')
rootDir = os.path.join(WORKDIR, '_root')
if os.path.exists(buildDir):
shutil.rmtree(buildDir)
if os.path.exists(rootDir):
shutil.rmtree(rootDir)
os.makedirs(buildDir)
os.makedirs(rootDir)
os.makedirs(os.path.join(rootDir, 'empty-dir'))
curdir = os.getcwd()
os.chdir(buildDir)
# Not sure if this is still needed, the original build script
# claims that parts of the install assume python.exe exists.
os.symlink('python', os.path.join(buildDir, 'python.exe'))
# Extract the version from the configure file, needed to calculate
# several paths.
version = getVersion()
# Since the extra libs are not in their installed framework location
# during the build, augment the library path so that the interpreter
# will find them during its extension import sanity checks.
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(WORKDIR,
'libraries', 'usr', 'local', 'lib')
print("Running configure...")
runCommand("%s -C --enable-framework --enable-universalsdk=%s "
"--with-universal-archs=%s "
"%s "
"%s "
"LDFLAGS='-g -L%s/libraries/usr/local/lib' "
"CFLAGS='-g -I%s/libraries/usr/local/include' 2>&1"%(
shellQuote(os.path.join(SRCDIR, 'configure')), shellQuote(SDKPATH),
UNIVERSALARCHS,
(' ', '--with-computed-gotos ')[PYTHON_3],
(' ', '--without-ensurepip ')[PYTHON_3],
shellQuote(WORKDIR)[1:-1],
shellQuote(WORKDIR)[1:-1]))
# Look for environment value BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS
# and, if defined, append its value to the make command. This allows
# us to pass in version control tags, like GITTAG, to a build from a
# tarball rather than from a vcs checkout, thus eliminating the need
# to have a working copy of the vcs program on the build machine.
#
# A typical use might be:
# export BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS=" \
# GITVERSION='echo 123456789a' \
# GITTAG='echo v3.6.0' \
# GITBRANCH='echo 3.6'"
make_extras = os.getenv("BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS")
if make_extras:
make_cmd = "make " + make_extras
else:
make_cmd = "make"
print("Running " + make_cmd)
runCommand(make_cmd)
print("Running make install")
runCommand("make install DESTDIR=%s"%(
shellQuote(rootDir)))
print("Running make frameworkinstallextras")
runCommand("make frameworkinstallextras DESTDIR=%s"%(
shellQuote(rootDir)))
del os.environ['DYLD_LIBRARY_PATH']
print("Copying required shared libraries")
if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')):
runCommand("mv %s/* %s"%(
shellQuote(os.path.join(
WORKDIR, 'libraries', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(),
'lib')),
shellQuote(os.path.join(WORKDIR, '_root', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(),
'lib'))))
path_to_lib = os.path.join(rootDir, 'Library', 'Frameworks',
'Python.framework', 'Versions',
version, 'lib', 'python%s'%(version,))
print("Fix file modes")
frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework')
gid = grp.getgrnam('admin').gr_gid
shared_lib_error = False
for dirpath, dirnames, filenames in os.walk(frmDir):
for dn in dirnames:
os.chmod(os.path.join(dirpath, dn), STAT_0o775)
os.chown(os.path.join(dirpath, dn), -1, gid)
for fn in filenames:
if os.path.islink(fn):
continue
# "chmod g+w $fn"
p = os.path.join(dirpath, fn)
st = os.stat(p)
os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP)
os.chown(p, -1, gid)
if fn in EXPECTED_SHARED_LIBS:
# check to see that this file was linked with the
# expected library path and version
data = captureCommand("otool -L %s" % shellQuote(p))
for sl in EXPECTED_SHARED_LIBS[fn]:
if ("\t%s " % sl) not in data:
print("Expected shared lib %s was not linked with %s"
% (sl, p))
shared_lib_error = True
if shared_lib_error:
fatal("Unexpected shared library errors.")
if PYTHON_3:
LDVERSION=None
VERSION=None
ABIFLAGS=None
fp = open(os.path.join(buildDir, 'Makefile'), 'r')
for ln in fp:
if ln.startswith('VERSION='):
VERSION=ln.split()[1]
if ln.startswith('ABIFLAGS='):
ABIFLAGS=ln.split()[1]
if ln.startswith('LDVERSION='):
LDVERSION=ln.split()[1]
fp.close()
LDVERSION = LDVERSION.replace('$(VERSION)', VERSION)
LDVERSION = LDVERSION.replace('$(ABIFLAGS)', ABIFLAGS)
config_suffix = '-' + LDVERSION
else:
config_suffix = '' # Python 2.x
# We added some directories to the search path during the configure
# phase. Remove those because those directories won't be there on
# the end-users system. Also remove the directories from _sysconfigdata.py
# (added in 3.3) if it exists.
include_path = '-I%s/libraries/usr/local/include' % (WORKDIR,)
lib_path = '-L%s/libraries/usr/local/lib' % (WORKDIR,)
# fix Makefile
path = os.path.join(path_to_lib, 'config' + config_suffix, 'Makefile')
fp = open(path, 'r')
data = fp.read()
fp.close()
for p in (include_path, lib_path):
data = data.replace(" " + p, '')
data = data.replace(p + " ", '')
fp = open(path, 'w')
fp.write(data)
fp.close()
# fix _sysconfigdata if it exists
#
# TODO: make this more robust! test_sysconfig_module of
# distutils.tests.test_sysconfig.SysconfigTestCase tests that
# the output from get_config_var in both sysconfig and
# distutils.sysconfig is exactly the same for both CFLAGS and
# LDFLAGS. The fixing up is now complicated by the pretty
# printing in _sysconfigdata.py. Also, we are using the
# pprint from the Python running the installer build which
# may not cosmetically format the same as the pprint in the Python
# being built (and which is used to originally generate
# _sysconfigdata.py).
import pprint
path = os.path.join(path_to_lib, '_sysconfigdata.py')
if os.path.exists(path):
fp = open(path, 'r')
data = fp.read()
fp.close()
# create build_time_vars dict
exec(data)
vars = {}
for k, v in build_time_vars.items():
if type(v) == type(''):
for p in (include_path, lib_path):
v = v.replace(' ' + p, '')
v = v.replace(p + ' ', '')
vars[k] = v
fp = open(path, 'w')
# duplicated from sysconfig._generate_posix_vars()
fp.write('# system configuration generated and used by'
' the sysconfig module\n')
fp.write('build_time_vars = ')
pprint.pprint(vars, stream=fp)
fp.close()
# Add symlinks in /usr/local/bin, using relative links
usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin')
to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks',
'Python.framework', 'Versions', version, 'bin')
if os.path.exists(usr_local_bin):
shutil.rmtree(usr_local_bin)
os.makedirs(usr_local_bin)
for fn in os.listdir(
os.path.join(frmDir, 'Versions', version, 'bin')):
os.symlink(os.path.join(to_framework, fn),
os.path.join(usr_local_bin, fn))
os.chdir(curdir)
if PYTHON_3:
# Remove the 'Current' link, that way we don't accidentally mess
# with an already installed version of python 2
os.unlink(os.path.join(rootDir, 'Library', 'Frameworks',
'Python.framework', 'Versions', 'Current'))
def patchFile(inPath, outPath):
data = fileContents(inPath)
data = data.replace('$FULL_VERSION', getFullVersion())
data = data.replace('$VERSION', getVersion())
data = data.replace('$MACOSX_DEPLOYMENT_TARGET', ''.join((DEPTARGET, ' or later')))
data = data.replace('$ARCHITECTURES', ", ".join(universal_opts_map[UNIVERSALARCHS]))
data = data.replace('$INSTALL_SIZE', installSize())
data = data.replace('$THIRD_PARTY_LIBS', "\\\n".join(THIRD_PARTY_LIBS))
# This one is not handy as a template variable
data = data.replace('$PYTHONFRAMEWORKINSTALLDIR', '/Library/Frameworks/Python.framework')
fp = open(outPath, 'w')
fp.write(data)
fp.close()
def patchScript(inPath, outPath):
major, minor = getVersionMajorMinor()
data = fileContents(inPath)
data = data.replace('@PYMAJOR@', str(major))
data = data.replace('@PYVER@', getVersion())
fp = open(outPath, 'w')
fp.write(data)
fp.close()
os.chmod(outPath, STAT_0o755)
def packageFromRecipe(targetDir, recipe):
curdir = os.getcwd()
try:
# The major version (such as 2.5) is included in the package name
# because having two version of python installed at the same time is
# common.
pkgname = '%s-%s'%(recipe['name'], getVersion())
srcdir = recipe.get('source')
pkgroot = recipe.get('topdir', srcdir)
postflight = recipe.get('postflight')
readme = textwrap.dedent(recipe['readme'])
isRequired = recipe.get('required', True)
print("- building package %s"%(pkgname,))
# Substitute some variables
textvars = dict(
VER=getVersion(),
FULLVER=getFullVersion(),
)
readme = readme % textvars
if pkgroot is not None:
pkgroot = pkgroot % textvars
else:
pkgroot = '/'
if srcdir is not None:
srcdir = os.path.join(WORKDIR, '_root', srcdir[1:])
srcdir = srcdir % textvars
if postflight is not None:
postflight = os.path.abspath(postflight)
packageContents = os.path.join(targetDir, pkgname + '.pkg', 'Contents')
os.makedirs(packageContents)
if srcdir is not None:
os.chdir(srcdir)
runCommand("pax -wf %s . 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("gzip -9 %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("mkbom . %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.bom')),))
fn = os.path.join(packageContents, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
rsrcDir = os.path.join(packageContents, "Resources")
os.mkdir(rsrcDir)
fp = open(os.path.join(rsrcDir, 'ReadMe.txt'), 'w')
fp.write(readme)
fp.close()
if postflight is not None:
patchScript(postflight, os.path.join(rsrcDir, 'postflight'))
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python.%s %s"%(pkgname, vers,),
CFBundleIdentifier='org.python.Python.%s'%(pkgname,),
CFBundleName='Python.%s'%(pkgname,),
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction="RootAuthorization",
IFPkgFlagDefaultLocation=pkgroot,
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=True,
IFPkgFlagIsRequired=isRequired,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction="NoRestart",
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
)
writePlist(pl, os.path.join(packageContents, 'Info.plist'))
pl = Plist(
IFPkgDescriptionDescription=readme,
IFPkgDescriptionTitle=recipe.get('long_name', "Python.%s"%(pkgname,)),
IFPkgDescriptionVersion=vers,
)
writePlist(pl, os.path.join(packageContents, 'Resources', 'Description.plist'))
finally:
os.chdir(curdir)
def makeMpkgPlist(path):
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python %s"%(vers,),
CFBundleIdentifier='org.python.Python',
CFBundleName='Python',
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFlagComponentDirectory="Contents/Packages",
IFPkgFlagPackageList=[
dict(
IFPkgFlagPackageLocation='%s-%s.pkg'%(item['name'], getVersion()),
IFPkgFlagPackageSelection=item.get('selected', 'selected'),
)
for item in pkg_recipes()
],
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagBackgroundScaling="proportional",
IFPkgFlagBackgroundAlignment="left",
IFPkgFlagAuthorizationAction="RootAuthorization",
)
writePlist(pl, path)
def buildInstaller():
# Zap all compiled files
for dirpath, _, filenames in os.walk(os.path.join(WORKDIR, '_root')):
for fn in filenames:
if fn.endswith('.pyc') or fn.endswith('.pyo'):
os.unlink(os.path.join(dirpath, fn))
outdir = os.path.join(WORKDIR, 'installer')
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
pkgroot = os.path.join(outdir, 'Python.mpkg', 'Contents')
pkgcontents = os.path.join(pkgroot, 'Packages')
os.makedirs(pkgcontents)
for recipe in pkg_recipes():
packageFromRecipe(pkgcontents, recipe)
rsrcDir = os.path.join(pkgroot, 'Resources')
fn = os.path.join(pkgroot, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
os.mkdir(rsrcDir)
makeMpkgPlist(os.path.join(pkgroot, 'Info.plist'))
pl = Plist(
IFPkgDescriptionTitle="Python",
IFPkgDescriptionVersion=getVersion(),
)
writePlist(pl, os.path.join(pkgroot, 'Resources', 'Description.plist'))
for fn in os.listdir('resources'):
if fn == '.svn': continue
if fn.endswith('.jpg'):
shutil.copy(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
else:
patchFile(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
def installSize(clear=False, _saved=[]):
if clear:
del _saved[:]
if not _saved:
data = captureCommand("du -ks %s"%(
shellQuote(os.path.join(WORKDIR, '_root'))))
_saved.append("%d"%((0.5 + (int(data.split()[0]) / 1024.0)),))
return _saved[0]
def buildDMG():
"""
Create DMG containing the rootDir.
"""
outdir = os.path.join(WORKDIR, 'diskimage')
if os.path.exists(outdir):
shutil.rmtree(outdir)
imagepath = os.path.join(outdir,
'python-%s-macosx%s'%(getFullVersion(),DEPTARGET))
if INCLUDE_TIMESTAMP:
imagepath = imagepath + '-%04d-%02d-%02d'%(time.localtime()[:3])
imagepath = imagepath + '.dmg'
os.mkdir(outdir)
volname='Python %s'%(getFullVersion())
runCommand("hdiutil create -format UDRW -volname %s -srcfolder %s %s"%(
shellQuote(volname),
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath + ".tmp.dmg" )))
if not os.path.exists(os.path.join(WORKDIR, "mnt")):
os.mkdir(os.path.join(WORKDIR, "mnt"))
runCommand("hdiutil attach %s -mountroot %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(os.path.join(WORKDIR, "mnt"))))
# Custom icon for the DMG, shown when the DMG is mounted.
shutil.copy("../Icons/Disk Image.icns",
os.path.join(WORKDIR, "mnt", volname, ".VolumeIcon.icns"))
runCommand("SetFile -a C %s/"%(
shellQuote(os.path.join(WORKDIR, "mnt", volname)),))
runCommand("hdiutil detach %s"%(shellQuote(os.path.join(WORKDIR, "mnt", volname))))
setIcon(imagepath + ".tmp.dmg", "../Icons/Disk Image.icns")
runCommand("hdiutil convert %s -format UDZO -o %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(imagepath)))
setIcon(imagepath, "../Icons/Disk Image.icns")
os.unlink(imagepath + ".tmp.dmg")
return imagepath
def setIcon(filePath, icnsPath):
"""
Set the custom icon for the specified file or directory.
"""
dirPath = os.path.normpath(os.path.dirname(__file__))
toolPath = os.path.join(dirPath, "seticon.app/Contents/MacOS/seticon")
if not os.path.exists(toolPath) or os.stat(toolPath).st_mtime < os.stat(dirPath + '/seticon.m').st_mtime:
# NOTE: The tool is created inside an .app bundle, otherwise it won't work due
# to connections to the window server.
appPath = os.path.join(dirPath, "seticon.app/Contents/MacOS")
if not os.path.exists(appPath):
os.makedirs(appPath)
runCommand("cc -o %s %s/seticon.m -framework Cocoa"%(
shellQuote(toolPath), shellQuote(dirPath)))
runCommand("%s %s %s"%(shellQuote(os.path.abspath(toolPath)), shellQuote(icnsPath),
shellQuote(filePath)))
def main():
# First parse options and check if we can perform our work
parseOptions()
checkEnvironment()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = DEPTARGET
os.environ['CC'] = CC
os.environ['CXX'] = CXX
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.mkdir(WORKDIR)
os.environ['LC_ALL'] = 'C'
# Then build third-party libraries such as sleepycat DB4.
buildLibraries()
# Now build python itself
buildPython()
# And then build the documentation
# Remove the Deployment Target from the shell
# environment, it's no longer needed and
# an unexpected build target can cause problems
# when Sphinx and its dependencies need to
# be (re-)installed.
del os.environ['MACOSX_DEPLOYMENT_TARGET']
buildPythonDocs()
# Prepare the applications folder
folder = os.path.join(WORKDIR, "_root", "Applications", "Python %s"%(
getVersion(),))
fn = os.path.join(folder, "License.rtf")
patchFile("resources/License.rtf", fn)
fn = os.path.join(folder, "ReadMe.rtf")
patchFile("resources/ReadMe.rtf", fn)
fn = os.path.join(folder, "Update Shell Profile.command")
patchScript("scripts/postflight.patch-profile", fn)
os.chmod(folder, STAT_0o755)
setIcon(folder, "../Icons/Python Folder.icns")
# Create the installer
buildInstaller()
# And copy the readme into the directory containing the installer
patchFile('resources/ReadMe.rtf',
os.path.join(WORKDIR, 'installer', 'ReadMe.rtf'))
# Ditto for the license file.
patchFile('resources/License.rtf',
os.path.join(WORKDIR, 'installer', 'License.rtf'))
fp = open(os.path.join(WORKDIR, 'installer', 'Build.txt'), 'w')
fp.write("# BUILD INFO\n")
fp.write("# Date: %s\n" % time.ctime())
fp.write("# By: %s\n" % pwd.getpwuid(os.getuid()).pw_gecos)
fp.close()
# And copy it to a DMG
buildDMG()
if __name__ == "__main__":
main()
| apache-2.0 | -1,713,098,667,837,621,000 | 36.150358 | 174 | 0.574505 | false |
balopat/pyquil | pyquil/tests/test_paulis.py | 1 | 15171 | #!/usr/bin/python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
from pyquil.paulis import (PauliTerm, PauliSum, exponential_map, ID, exponentiate,
trotterize, is_zero, check_commutation, commuting_sets,
)
from pyquil.quil import Program
from pyquil.gates import RX, RZ, CNOT, H, X, PHASE
import math
from itertools import product
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def compare_progs(test, reference):
"""
compares two programs gate by gate, param by param
"""
tinstr = test.actions
rinstr = reference.actions
assert len(tinstr) == len(rinstr)
for idx in xrange(len(tinstr)):
# check each field of the instruction object
assert tinstr[idx][1].operator_name == rinstr[idx][1].operator_name
assert len(tinstr[idx][1].parameters) == len(rinstr[idx][1].parameters)
for pp in xrange(len(tinstr[idx][1].parameters)):
cmp_val = isclose(tinstr[idx][1].parameters[pp], rinstr[idx][1].parameters[pp])
assert cmp_val
assert len(tinstr[idx][1].arguments) == len(rinstr[idx][1].arguments)
for aa in xrange(len(tinstr[idx][1].arguments)):
assert tinstr[idx][1].arguments[aa] == rinstr[idx][1].arguments[aa]
def test_simplify_terms():
term = PauliTerm('Z', 0) * -1.0 * PauliTerm('Z', 0)
assert term.id() == ''
assert term.coefficient == -1.0
term = PauliTerm('Z', 0) + PauliTerm('Z', 0, 1.0)
assert str(term) == '2.0*Z0'
def test_get_qubits():
term = PauliTerm('Z', 0) * PauliTerm('X', 1)
assert term.get_qubits() == [0, 1]
sum_term = PauliTerm('X', 0, 0.5) + 0.5j * PauliTerm('Y', 10) * PauliTerm('Y', 0, 0.5j)
assert sum_term.get_qubits() == [0, 10]
def test_simplify_term_id_1():
term = PauliTerm('I', 0, 0.5)
assert term.id() == ''
assert term.coefficient == 0.5
def test_simplify_term_id_2():
term = 0.5 * ID
assert term.id() == ''
assert term.coefficient == 0.5
def test_simplify_term_id_3():
s = 0.25 + 0.25 * ID
terms = s.terms
assert len(terms) == 1
assert terms[0].id() == ''
assert terms[0].coefficient == 0.5
def test_simplify_term_single():
term = PauliTerm('Z', 0) * PauliTerm('I', 1) * PauliTerm('X', 2, 0.5j) * PauliTerm('Z', 0, 1.0)
assert term.id() == 'X2'
assert term.coefficient == 0.5j
def test_simplify_term_xz():
term1 = (-0.5 * PauliTerm('X', 0)) * (-1.0 * PauliTerm('Z', 0))
term2 = -0.5 * PauliTerm('X', 0) * (-1.0) * PauliTerm('Z', 0)
term3 = 0.5 * PauliTerm('X', 0) * PauliTerm('Z', 0)
for term in [term1, term2, term3]:
assert term.id() == 'Y0'
assert term.coefficient == -0.5j
def test_simplify_term_multindex():
term = PauliTerm('X', 0, coefficient=-0.5) * PauliTerm('Z', 0, coefficient=-1.0) \
* PauliTerm('X', 2, 0.5)
assert term.id() == 'Y0X2'
assert term.coefficient == -0.25j
def test_simplify_sum_terms():
sum_term = PauliSum([PauliTerm('X', 0, 0.5), PauliTerm('Z', 0, 0.5j)])
assert str(sum_term + sum_term) == '1.0*X0 + 1j*Z0'
sum_term = PauliSum([PauliTerm('X', 0, 0.5), PauliTerm('X', 0, 0.5)])
assert str(sum_term.simplify()) == '1.0*X0'
# test the simplify on multiplication
sum_term = PauliSum([PauliTerm('X', 0, 0.5), PauliTerm('X', 0, 0.5)])
assert str(sum_term * sum_term) == '1.0*I'
def test_len():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert len(term) == 2
def test_enumerate():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
position_op_pairs = [(0, "Z"), (1, "Z"), (5, "X")]
for key, val in term:
assert (key, val) in position_op_pairs
def test_getitem():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
assert term[0] == "Z"
assert term[1] == "Z"
assert term[2] == "I"
assert term[3] == "I"
assert term[4] == "I"
assert term[5] == "X"
assert len(term) == 3
def test_ids():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert term_1.id() == term_2.id()
def test_pauliop_inputs():
with pytest.raises(AssertionError):
PauliTerm('X', -2)
def test_pauli_sum():
q_plus = 0.5 * PauliTerm('X', 0) + 0.5j * PauliTerm('Y', 0)
the_sum = q_plus * PauliSum([PauliTerm('X', 0)])
term_strings = map(lambda x: str(x), the_sum.terms)
assert '0.5*I' in term_strings
assert '(0.5+0j)*Z0' in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = q_plus * PauliTerm('X', 0)
term_strings = map(lambda x: str(x), the_sum.terms)
assert '0.5*I' in term_strings
assert '(0.5+0j)*Z0' in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = PauliTerm('X', 0) * q_plus
term_strings = map(lambda x: str(x), the_sum.terms)
assert '0.5*I' in term_strings
assert '(-0.5+0j)*Z0' in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
def test_ps_adds_pt_1():
term = ID
b = term + term
assert str(b) == "2.0*I"
assert str(b + term) == "3.0*I"
assert str(term + b) == "3.0*I"
def test_ps_adds_pt_2():
term = ID
b = term + 1.0
assert str(b) == "2.0*I"
assert str(b + 1.0) == "3.0*I"
assert str(1.0 + b) == "3.0*I"
def test_zero_terms():
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0) + \
PauliTerm("Y", 0, 0.5)
assert str(term) == "0.5*Y0"
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0)
assert str(term) == "0.0*I"
assert len(term.terms) == 1
term2 = term * PauliTerm("Z", 2, 0.5)
assert str(term2) == "0.0*I"
term3 = PauliTerm("Z", 2, 0.5) + term
assert str(term3) == "0.5*Z2"
term4 = PauliSum([])
assert str(term4) == "0.0*I"
term = PauliSum([PauliTerm("X", 0, 0.0), PauliTerm("Y", 1, 1.0) *
PauliTerm("Z", 2)])
assert str(term) == "0.0*X0 + 1.0*Y1*Z2"
term = term.simplify()
assert str(term) == "1.0*Y1*Z2"
def test_exponentiate():
# test rotation of single qubit
generator = PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(RZ(2.0)(0))
compare_progs(prog, result_prog)
# testing general 2-circuit
generator = PauliTerm("Z", 1, 1.0) * PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(CNOT(0, 1)).inst(RZ(2.0)(1)).inst(CNOT(0, 1))
compare_progs(prog, result_prog)
# testing change of basis position 0
generator = PauliTerm("Z", 1, 1.0) * PauliTerm("X", 0, 1.0)
param_prog = exponential_map(generator)
prog = param_prog(1)
result_prog = Program().inst([H(0), CNOT(0, 1), RZ(2.0)(1), CNOT(0, 1),
H(0)])
compare_progs(prog, result_prog)
# testing change of basis position 1
generator = PauliTerm("X", 1, 1.0) * PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([H(1), CNOT(0, 1), RZ(2.0)(1), CNOT(0, 1),
H(1)])
compare_progs(prog, result_prog)
# testing change of basis position 0
generator = PauliTerm("Z", 1, 1.0) * PauliTerm("Y", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0)(0), CNOT(0, 1), RZ(2.0)(1),
CNOT(0, 1), RX(-math.pi / 2)(0)])
compare_progs(prog, result_prog)
# testing change of basis position 1
generator = PauliTerm("Y", 1, 1.0) * PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0)(1), CNOT(0, 1), RZ(2.0)(1),
CNOT(0, 1), RX(-math.pi / 2.0)(1)])
compare_progs(prog, result_prog)
# testing circuit for 3-terms with change of basis
generator = PauliTerm("X", 2, 1.0) * PauliTerm("Y", 1, 1.0) * PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0)(1), H(2), CNOT(0, 1),
CNOT(1, 2), RZ(2.0)(2), CNOT(1, 2),
CNOT(0, 1), RX(-math.pi / 2.0)(1), H(2)])
compare_progs(prog, result_prog)
# testing circuit for 3-terms non-sequential
generator = PauliTerm("Y", 3, 1.0) * PauliTerm("Y", 2, 1.0) * PauliTerm("I", 1,
1.0) * PauliTerm("Y", 0,
1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0)(0), RX(math.pi / 2.0)(2),
RX(math.pi / 2.0)(3), CNOT(0, 2),
CNOT(2, 3), RZ(2.0)(3), CNOT(2, 3),
CNOT(0, 2), RX(-math.pi / 2.0)(0),
RX(-math.pi / 2.0)(2), RX(-math.pi / 2.0)(3)])
compare_progs(prog, result_prog)
def test_exponentiate_prog():
ham = PauliTerm("Z", 0)
result_prog = Program(RZ(2.0, 0))
prog = exponentiate(ham)
compare_progs(result_prog, prog)
def test_exponentiate_identity():
generator = PauliTerm("I", 1, 0.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(0)(0), X(0), PHASE(0)(0)])
compare_progs(prog, result_prog)
generator = PauliTerm("I", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-1.0)(0), X(0), PHASE(-1.0)(0)])
compare_progs(prog, result_prog)
generator = PauliTerm("I", 10, 0.08)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-0.08)(0), X(0), PHASE(-0.08)(0)])
compare_progs(prog, result_prog)
def test_trotterize():
term_one = PauliTerm("X", 0, 1.0)
term_two = PauliTerm("Z", 0, 1.0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=5)
prog, _ = trotterize(term_one, term_one)
result_prog = Program().inst([H(0), RZ(2.0)(0), H(0), H(0),
RZ(2.0)(0), H(0)])
compare_progs(prog, result_prog)
# trotter_order 1 steps 1
prog, _ = trotterize(term_one, term_two, trotter_steps=1)
result_prog = Program().inst([H(0), RZ(2.0)(0), H(0), RZ(2.0)(0)])
compare_progs(prog, result_prog)
# trotter_order 1 steps 2
prog, _ = trotterize(term_one, term_two, trotter_steps=2)
result_prog = Program().inst([H(0), RZ(1.0)(0), H(0), RZ(1.0)(0),
H(0), RZ(1.0)(0), H(0), RZ(1.0)(0)])
compare_progs(prog, result_prog)
# trotter_order 2 steps 1
prog, _ = trotterize(term_one, term_two, trotter_order=2)
result_prog = Program().inst([H(0), RZ(1.0)(0), H(0), RZ(2.0)(0),
H(0), RZ(1.0)(0), H(0)])
compare_progs(prog, result_prog)
# trotter_order 2 steps 2
prog, _ = trotterize(term_one, term_two, trotter_order=2, trotter_steps=2)
result_prog = Program().inst([H(0), RZ(0.5)(0), H(0), RZ(1.0)(0),
H(0), RZ(0.5)(0), H(0),
H(0), RZ(0.5)(0), H(0), RZ(1.0)(0),
H(0), RZ(0.5)(0), H(0)])
compare_progs(prog, result_prog)
# trotter_order 3 steps 1
prog, _ = trotterize(term_one, term_two, trotter_order=3, trotter_steps=1)
result_prog = Program().inst([H(0), RZ(14.0 / 24)(0), H(0), RZ(4.0 / 3.0)(0),
H(0), RZ(1.5)(0), H(0), RZ(-4.0 / 3.0)(0),
H(0), RZ(-2.0 / 24)(0), H(0), RZ(2.0)(0)])
compare_progs(prog, result_prog)
def test_is_zeron():
with pytest.raises(TypeError):
is_zero(1)
p_term = PauliTerm("X", 0)
ps_term = p_term + PauliTerm("Z", 1)
assert not is_zero(p_term)
assert is_zero(p_term + -1 * p_term)
assert not is_zero(ps_term)
def test_check_commutation():
term1 = PauliTerm("X", 0) * PauliTerm("X", 1)
term2 = PauliTerm("Y", 0) * PauliTerm("Y", 1)
term3 = PauliTerm("Y", 0) * PauliTerm("Z", 2)
# assert check_commutation(PauliSum([term1]), term2)
assert check_commutation([term2], term3)
assert check_commutation([term2], term3)
assert not check_commutation([term1], term3)
# more rigorous test. Get all operators in Pauli group
p_n_group = ("I", "X", "Y", "Z")
pauli_list = list(product(p_n_group, repeat=3))
pauli_ops = map(lambda x: zip(x, range(3)), pauli_list)
pauli_ops_pq = []
for op in pauli_ops:
pauli_ops_pq.append(reduce(lambda x, y: x * PauliTerm(y[0], y[1]),
op[1:],
PauliTerm(op[0][0], op[0][1]))
)
def commutator(t1, t2):
return t1 * t2 + -1 * t2 * t1
non_commuting_pairs = []
commuting_pairs = []
for x in xrange(len(pauli_ops_pq)):
for y in xrange(x, len(pauli_ops_pq)):
tmp_op = commutator(pauli_ops_pq[x], pauli_ops_pq[y])
assert len(tmp_op.terms) == 1
if tmp_op.terms[0].id() == '':
commuting_pairs.append((pauli_ops_pq[x], pauli_ops_pq[y]))
else:
non_commuting_pairs.append((pauli_ops_pq[x], pauli_ops_pq[y]))
# now that we have our sets let's check against our code.
for t1, t2 in non_commuting_pairs:
assert not check_commutation([t1], t2)
for t1, t2 in commuting_pairs:
assert check_commutation([t1], t2)
def test_commuting_sets():
term1 = PauliTerm("X", 0) * PauliTerm("X", 1)
term2 = PauliTerm("Y", 0) * PauliTerm("Y", 1)
term3 = PauliTerm("Y", 0) * PauliTerm("Z", 2)
pauli_sum = term1 + term2 + term3
commuting_sets(pauli_sum, 3)
| apache-2.0 | -8,069,435,463,665,465,000 | 34.696471 | 100 | 0.544921 | false |
mstriemer/zamboni | mkt/submit/helpers.py | 1 | 2132 | import re
import jinja2
from jingo import register, env
from langid import classify
import mkt
from mkt.submit.models import AppSubmissionChecklist
def del_by_key(data, delete):
"""Delete a tuple from a list of tuples based on its first item."""
data = list(data)
for idx, item in enumerate(data):
if ((isinstance(item[0], basestring) and item[0] == delete) or
(isinstance(item[0], (list, tuple)) and item[0] in delete)):
del data[idx]
return data
@register.function
def progress(request, addon, step):
steps = list(mkt.APP_STEPS)
completed = []
# TODO: Hide "Developer Account" step if user already read Dev Agreement.
# if request.user.read_dev_agreement:
# steps = del_by_key(steps, 'terms')
if addon:
try:
completed = addon.appsubmissionchecklist.get_completed()
except AppSubmissionChecklist.DoesNotExist:
pass
# We don't yet have a checklist yet if we just read the Dev Agreement.
if not completed and step and step != 'terms':
completed = ['terms']
c = dict(steps=steps, current=step, completed=completed)
t = env.get_template('submit/helpers/progress.html').render(c)
return jinja2.Markup(t)
def guess_language(text):
"""
Passed a string, returns a two-tuple indicating the language of that
string, and the confidence on a 0-1.0 scale.
If the confidence is below 0.7, or below 0.9 in a string of 3 words or
less, will return None.
"""
guess, confidence = classify(text)
if confidence < 0.7:
return None
elif confidence < 0.9:
word_count = len(re.findall(r"[\w']+", text))
if word_count <= 3:
return None
return guess
def string_to_translatedfield_value(text):
"""
Passed a string, will return a dict mapping 'language': string, suitable to
be assigned to the value of a TranslatedField. If the language can not be
determined with confidence, will assume en-US.
"""
lang = guess_language(text)
if lang:
return {lang: text}
return {'en-us': text}
| bsd-3-clause | 5,081,919,413,981,935,000 | 28.205479 | 79 | 0.64728 | false |
openmotics/gateway | src/gateway/hal/master_controller_classic.py | 1 | 81283 | # Copyright (C) 2019 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module for communicating with the Master
"""
from __future__ import absolute_import
import functools
import logging
import re
import subprocess
import time
from datetime import datetime
from threading import Lock, Timer
import six
from gateway.daemon_thread import DaemonThread, DaemonThreadWait
from gateway.dto import RTD10DTO, DimmerConfigurationDTO, GlobalFeedbackDTO, \
GlobalRTD10DTO, GroupActionDTO, InputDTO, InputStatusDTO, LegacyScheduleDTO, \
LegacyStartupActionDTO, MasterSensorDTO, ModuleDTO, OutputDTO, \
OutputStatusDTO, PulseCounterDTO, PumpGroupDTO, ShutterDTO, \
ShutterGroupDTO, ThermostatAircoStatusDTO, ThermostatDTO, \
ThermostatGroupDTO
from gateway.enums import ShutterEnums
from gateway.exceptions import UnsupportedException
from gateway.hal.mappers_classic import DimmerConfigurationMapper, \
GlobalFeedbackMapper, GlobalRTD10Mapper, GroupActionMapper, InputMapper, \
LegacyScheduleMapper, LegacyStartupActionMapper, OutputMapper, \
PulseCounterMapper, PumpGroupMapper, RTD10Mapper, SensorMapper, \
ShutterGroupMapper, ShutterMapper, ThermostatGroupMapper, \
ThermostatMapper
from gateway.exceptions import CommunicationFailure
from gateway.hal.master_controller import MasterController
from gateway.hal.master_event import MasterEvent
from gateway.pubsub import PubSub
from ioc import INJECTED, Inject
from master.classic import eeprom_models, master_api
from master.classic.eeprom_controller import EepromAddress, EepromController
from master.classic.eeprom_models import CoolingConfiguration, \
CoolingPumpGroupConfiguration, DimmerConfiguration, \
GlobalRTD10Configuration, GlobalThermostatConfiguration, \
PumpGroupConfiguration, RTD10CoolingConfiguration, \
RTD10HeatingConfiguration, ScheduledActionConfiguration, \
StartupActionConfiguration, ThermostatConfiguration
from master.classic.master_communicator import BackgroundConsumer, \
MasterCommunicator, MasterUnavailable
from master.classic.master_heartbeat import MasterHeartbeat
from master.classic.slave_updater import bootload_modules
from master.classic.validationbits import ValidationBitStatus
from serial_utils import CommunicationTimedOutException
from toolbox import Toolbox
if False: # MYPY
from typing import Any, Dict, List, Literal, Optional, Tuple
from serial import Serial
HEALTH = Literal['success', 'unstable', 'failure']
logger = logging.getLogger(__name__)
def communication_enabled(f):
@functools.wraps(f)
def wrapper(instance, *args, **kwargs):
if not instance._communication_enabled:
raise MasterUnavailable()
return f(instance, *args, **kwargs)
return wrapper
class MasterClassicController(MasterController):
@Inject
def __init__(self, master_communicator=INJECTED, eeprom_controller=INJECTED, pubsub=INJECTED):
# type: (MasterCommunicator, EepromController, PubSub) -> None
super(MasterClassicController, self).__init__(master_communicator)
self._master_communicator = master_communicator # type: MasterCommunicator
self._eeprom_controller = eeprom_controller
self._pubsub = pubsub
self._heartbeat = MasterHeartbeat()
self._plugin_controller = None # type: Optional[Any]
self._validation_bits = ValidationBitStatus(on_validation_bit_change=self._validation_bit_changed)
self._master_version_last_updated = 0.0
self._settings_last_updated = 0.0
self._time_last_updated = 0.0
self._synchronization_thread = DaemonThread(name='mastersync',
target=self._synchronize,
interval=5, delay=10)
self._master_version = None # type: Optional[Tuple[int, int, int]]
self._communication_enabled = True
self._output_config = {} # type: Dict[int, OutputDTO]
self._shutters_interval = 600
self._shutters_last_updated = 0.0
self._shutter_config = {} # type: Dict[int, ShutterDTO]
self._sensor_last_updated = 0.0
self._sensors_interval = 10
self._validation_bits_interval = 1800
self._validation_bits_last_updated = 0.0
self._discover_mode_timer = None # type: Optional[Timer]
self._module_log = [] # type: List[Dict[str, Any]]
self._pubsub.subscribe_master_events(PubSub.MasterTopics.EEPROM, self._handle_eeprom_event)
self._pubsub.subscribe_master_events(PubSub.MasterTopics.MAINTENANCE, self._handle_maintenance_event)
self._background_consumers_registered = False
self._master_communicator.register_consumer(
BackgroundConsumer(master_api.output_list(), 0, self._on_master_output_event, True)
)
self._master_communicator.register_consumer(
BackgroundConsumer(master_api.module_initialize(), 0, self._process_module_initialize_message)
)
self._module_log_lock = Lock()
#################
# Private stuff #
#################
def _synchronize(self):
# type: () -> None
try:
if not self._communication_enabled:
logger.info('synchronization, skipped')
return
now = time.time()
if self._master_version is None or self._master_version_last_updated < now - 300:
self._get_master_version()
self._master_version_last_updated = now
self._register_version_depending_background_consumers()
# Validate communicator checks
if self._time_last_updated < now - 300:
self._check_master_time()
self._time_last_updated = now
if self._settings_last_updated < now - 900:
self._check_master_settings()
self._settings_last_updated = now
# Refresh if required
if self._validation_bits_last_updated + self._validation_bits_interval < now:
self._refresh_validation_bits()
if self._shutters_last_updated + self._shutters_interval < now:
self._refresh_shutter_states()
if self._sensor_last_updated + self._sensors_interval < now:
self._refresh_sensor_values()
except CommunicationTimedOutException:
logger.error('Got communication timeout during synchronization, waiting 10 seconds.')
raise DaemonThreadWait
except CommunicationFailure:
# This is an expected situation
raise DaemonThreadWait
def _get_master_version(self):
# type: () -> None
self._master_version = self.get_firmware_version()
def _register_version_depending_background_consumers(self):
if self._background_consumers_registered is True or self._master_version is None:
return
self._master_communicator.register_consumer(
BackgroundConsumer(master_api.event_triggered(self._master_version), 0,
self._on_master_event, True)
)
self._master_communicator.register_consumer(
BackgroundConsumer(master_api.input_list(self._master_version), 0,
self._on_master_input_change)
)
self._master_communicator.register_consumer(
BackgroundConsumer(master_api.shutter_status(self._master_version), 0,
self._on_master_shutter_change)
)
self._background_consumers_registered = True
@communication_enabled
def _check_master_time(self):
# type: () -> None
"""
Validates the master's time with the Gateway time
"""
status = self._master_communicator.do_command(master_api.status())
master_time = datetime(1, 1, 1, status['hours'], status['minutes'], status['seconds'])
now = datetime.now()
expected_weekday = now.weekday() + 1
expected_time = now.replace(year=1, month=1, day=1, microsecond=0)
sync = False
if abs((master_time - expected_time).total_seconds()) > 180: # Allow 3 minutes difference
sync = True
if status['weekday'] != expected_weekday:
sync = True
if sync is True:
logger.info('Time - master: {0} ({1}) - gateway: {2} ({3})'.format(
master_time, status['weekday'], expected_time, expected_weekday)
)
if expected_time.hour == 0 and expected_time.minute < 15:
logger.info('Skip setting time between 00:00 and 00:15')
else:
self.sync_time()
@communication_enabled
def _check_master_settings(self):
# type: () -> None
"""
Checks master settings such as:
* Enable large installation
* Enable async messages
* Enable multi-tenancy
* Enable 32 thermostats
* Turn on all leds
"""
eeprom_data = self._master_communicator.do_command(master_api.eeprom_list(),
{'bank': 0})['data']
write = False
if eeprom_data[11] != 255:
logger.info('Disabling async RO messages.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 11, 'data': bytearray([255])}
)
write = True
if eeprom_data[18] != 0:
logger.info('Enabling async OL messages.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 18, 'data': bytearray([0])}
)
write = True
if eeprom_data[20] != 0:
logger.info('Enabling async IL messages.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 20, 'data': bytearray([0])}
)
write = True
if eeprom_data[28] != 0:
logger.info('Enabling async SO messages.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 28, 'data': bytearray([0])}
)
write = True
thermostat_mode = eeprom_data[14]
if thermostat_mode & 64 == 0:
logger.info('Enabling multi-tenant thermostats.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 14, 'data': bytearray([thermostat_mode | 64])}
)
write = True
if eeprom_data[59] != 32:
logger.info('Enabling 32 thermostats.')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 59, 'data': bytearray([32])}
)
write = True
if eeprom_data[24] != 0:
logger.info('Disable auto-reset thermostat setpoint')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 24, 'data': bytearray([0])}
)
write = True
if eeprom_data[13] != 0:
logger.info('Configure master startup mode to: API')
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': 0, 'address': 13, 'data': bytearray([0])}
)
write = True
if write:
self._master_communicator.do_command(master_api.activate_eeprom(), {'eep': 0},
timeout=5)
self.set_status_leds(True)
def _handle_maintenance_event(self, master_event):
# type: (MasterEvent) -> None
if master_event.type == MasterEvent.Types.MAINTENANCE_EXIT:
self._eeprom_controller.invalidate_cache()
def _handle_eeprom_event(self, master_event):
# type: (MasterEvent) -> None
if master_event.type == MasterEvent.Types.EEPROM_CHANGE:
self._invalidate_caches()
def _on_master_event(self, event_data): # type: (Dict[str, Any]) -> None
""" Handle an event triggered by the master. """
event_type = event_data.get('event_type', 0)
if event_type == 0: # None or 0 are both event_type for 'code'
code = str(event_data['bytes'][0])
if self._plugin_controller is not None:
self._plugin_controller.process_event(code)
elif event_type == 1:
bit_nr = event_data['bytes'][0]
value = bool(event_data['bytes'][1])
self._on_master_validation_bit_change(bit_nr, value)
else:
logger.warning('Received unknown master event: {0}'.format(event_data))
def _on_master_output_event(self, data):
# type: (Dict[str,Any]) -> None
""" Triggers when the master informs us of an Output state change """
# Publish status of all outputs. Since the event from the master contains
# all outputs that are currently on, the output(s) that changed can't be
# determined here.
state = {k: (False, None) for k, v in self._output_config.items()}
for output_id, dimmer in data['outputs']:
state[output_id] = (True, dimmer)
for output_id, (status, dimmer) in state.items():
extra_kwargs = {}
if dimmer is not None:
extra_kwargs['dimmer'] = dimmer
state_dto = OutputStatusDTO(id=output_id,
status=status,
**extra_kwargs)
master_event = MasterEvent(event_type=MasterEvent.Types.OUTPUT_STATUS, data={'state': state_dto})
self._pubsub.publish_master_event(PubSub.MasterTopics.OUTPUT, master_event)
def _invalidate_caches(self):
# type: () -> None
self._shutters_last_updated = 0.0
self._synchronization_thread.request_single_run()
#######################
# Internal management #
#######################
def start(self):
# type: () -> None
super(MasterClassicController, self).start()
self._heartbeat.start()
self._synchronization_thread.start()
def stop(self):
# type: () -> None
self._synchronization_thread.stop()
self._heartbeat.stop()
super(MasterClassicController, self).stop()
def set_plugin_controller(self, plugin_controller):
"""
Set the plugin controller.
:param plugin_controller: Plugin controller
:type plugin_controller: plugins.base.PluginController
"""
self._plugin_controller = plugin_controller
##############
# Public API #
##############
def get_master_online(self):
# type: () -> bool
return self._time_last_updated > time.time() - 900 \
and self._heartbeat.is_online()
def get_communicator_health(self):
# type: () -> HEALTH
return self._heartbeat.get_communicator_health()
@communication_enabled
def get_firmware_version(self):
out_dict = self._master_communicator.do_command(master_api.status())
return int(out_dict['f1']), int(out_dict['f2']), int(out_dict['f3'])
# Input
@communication_enabled
def get_input_module_type(self, input_module_id):
o = self._eeprom_controller.read(eeprom_models.InputConfiguration, input_module_id * 8, ['module_type'])
return o.module_type
@communication_enabled
def load_input_status(self):
# type: () -> List[InputStatusDTO]
number_of_input_modules = self._master_communicator.do_command(master_api.number_of_io_modules())['in']
inputs = []
for i in range(number_of_input_modules):
# we could be dealing with e.g. a temperature module, skip those
module_type = self.get_input_module_type(i)
if module_type not in ['i', 'I']:
continue
result = self._master_communicator.do_command(master_api.read_input_module(self._master_version),
{'input_module_nr': i})
module_status = result['input_status']
# module_status byte contains bits for each individual input, use mask and bitshift to get status
for n in range(8):
input_nr = i * 8 + n
input_status = module_status & (1 << n) != 0
inputs.append(InputStatusDTO(input_nr, status=input_status))
return inputs
@communication_enabled
def load_input(self, input_id): # type: (int) -> InputDTO
classic_object = self._eeprom_controller.read(eeprom_models.InputConfiguration, input_id)
if classic_object.module_type not in ['i', 'I']: # Only return 'real' inputs
raise TypeError('The given id {0} is not an input, but {1}'.format(input_id, classic_object.module_type))
return InputMapper.orm_to_dto(classic_object)
@communication_enabled
def load_inputs(self): # type: () -> List[InputDTO]
return [InputMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.InputConfiguration)
if o.module_type in ['i', 'I']] # Only return 'real' inputs
@communication_enabled
def save_inputs(self, inputs): # type: (List[InputDTO]) -> None
batch = []
for input_ in inputs:
batch.append(InputMapper.dto_to_orm(input_))
self._eeprom_controller.write_batch(batch)
def _on_master_input_change(self, data):
# type: (Dict[str,Any]) -> None
""" Triggers when the master informs us of an Input state change """
logger.debug('Got input event data from master {}'.format(data))
# previous versions of the master only sent rising edges, so default to True if not present in data
new_status = bool(data.get('status', True))
state_dto = InputStatusDTO(id=data['input'], status=new_status)
master_event = MasterEvent(event_type=MasterEvent.Types.INPUT_CHANGE, data={'state': state_dto})
self._pubsub.publish_master_event(PubSub.MasterTopics.INPUT, master_event)
@communication_enabled
def set_input(self, input_id, state):
# type: (int, bool) -> None
# https://wiki.openmotics.com/index.php/Virtual_Inputs
if input_id is None or input_id < 0 or input_id > 240:
raise ValueError('Input ID {0} not in range 0 <= id <= 240'.format(input_id))
if state:
self.do_basic_action(master_api.BA_INPUT_PRESS, input_id)
else:
self.do_basic_action(master_api.BA_INPUT_RELEASE, input_id)
# Outputs
@communication_enabled
def set_output(self, output_id, state, dimmer=None, timer=None):
if output_id is None or output_id < 0 or output_id > 240:
raise ValueError('Output ID {0} not in range 0 <= id <= 240'.format(output_id))
if dimmer is not None and dimmer < 0 or dimmer > 100:
raise ValueError('Dimmer value {0} not in [0, 100]'.format(dimmer))
if timer is not None and timer not in [150, 450, 900, 1500, 2220, 3120]:
raise ValueError('Timer value {0} not in [150, 450, 900, 1500, 2220, 3120]'.format(timer))
if dimmer is not None:
master_version = self.get_firmware_version()
if master_version >= (3, 143, 79):
dimmer = int(0.63 * dimmer)
self._master_communicator.do_command(
master_api.write_dimmer(),
{'output_nr': output_id, 'dimmer_value': dimmer}
)
else:
dimmer = int(dimmer) / 10 * 10
if dimmer == 0:
dimmer_action = master_api.BA_DIMMER_MIN
elif dimmer == 100:
dimmer_action = master_api.BA_DIMMER_MAX
else:
dimmer_action = getattr(master_api, 'BA_LIGHT_ON_DIMMER_{0}'.format(dimmer))
self.do_basic_action(dimmer_action, output_id)
if not state:
self.do_basic_action(master_api.BA_LIGHT_OFF, output_id)
return
self.do_basic_action(master_api.BA_LIGHT_ON, output_id)
if timer is not None:
timer_action = getattr(master_api, 'BA_LIGHT_ON_TIMER_{0}_OVERRULE'.format(timer))
self.do_basic_action(timer_action, output_id)
@communication_enabled
def toggle_output(self, output_id):
if output_id is None or output_id < 0 or output_id > 240:
raise ValueError('Output ID {0} not in range 0 <= id <= 240'.format(output_id))
self.do_basic_action(master_api.BA_LIGHT_TOGGLE, output_id)
@communication_enabled
def load_output(self, output_id): # type: (int) -> OutputDTO
classic_object = self._eeprom_controller.read(eeprom_models.OutputConfiguration, output_id)
output_dto = OutputMapper.orm_to_dto(classic_object)
self._output_config[output_id] = output_dto
return output_dto
@communication_enabled
def load_outputs(self): # type: () -> List[OutputDTO]
output_dtos = [OutputMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.OutputConfiguration)]
self._output_config = {output_dto.id: output_dto for output_dto in output_dtos}
return output_dtos
@communication_enabled
def save_outputs(self, outputs): # type: (List[OutputDTO]) -> None
batch = []
for output_dto in outputs:
batch.append(OutputMapper.dto_to_orm(output_dto))
self._eeprom_controller.write_batch(batch)
for output_dto in outputs:
if output_dto.timer is not None:
self._master_communicator.do_command(
master_api.write_timer(),
{'id': output_dto.id, 'timer': output_dto.timer}
)
@communication_enabled
def load_output_status(self):
# type: () -> List[OutputStatusDTO]
number_of_outputs = self._master_communicator.do_command(master_api.number_of_io_modules())['out'] * 8
output_status = []
for i in range(number_of_outputs):
data = self._master_communicator.do_command(master_api.read_output(), {'id': i})
output_status.append(OutputStatusDTO(id=i,
status=bool(data['status']),
ctimer=int(data['ctimer']),
dimmer=int(data['dimmer']),
locked=self._is_output_locked(data['id'])))
return output_status
def _is_output_locked(self, output_id):
# TODO remove self._output_config cache, this belongs in the output controller.
output_dto = self._output_config.get(output_id)
if output_dto is None:
output_dto = self.load_output(output_id)
if output_dto.lock_bit_id is not None:
value = self._validation_bits.get_validation_bit(output_dto.lock_bit_id)
locked = value
else:
locked = False
return locked
# Shutters
@communication_enabled
def shutter_up(self, shutter_id, timer=None): # type: (int, Optional[int]) -> None
if timer is not None:
if self._master_version is None or self._master_version < (3, 143, 113):
raise NotImplementedError('Shutter up with a timer is not supported on Master version {0}'.format(self._master_version))
self.do_basic_action(master_api.BA_SHUTTER_UP, shutter_id, parameter=timer)
else:
self.do_basic_action(master_api.BA_SHUTTER_UP, shutter_id)
@communication_enabled
def shutter_down(self, shutter_id, timer=None): # type: (int, Optional[int]) -> None
if timer is not None:
if self._master_version is None or self._master_version < (3, 143, 113):
raise NotImplementedError('Shutter down with a timer is not supported on Master version {0}'.format(self._master_version))
self.do_basic_action(master_api.BA_SHUTTER_DOWN, shutter_id, parameter=timer)
else:
self.do_basic_action(master_api.BA_SHUTTER_DOWN, shutter_id)
@communication_enabled
def shutter_stop(self, shutter_id): # type: (int) -> None
self.do_basic_action(master_api.BA_SHUTTER_STOP, shutter_id)
@communication_enabled
def load_shutter(self, shutter_id): # type: (int) -> ShutterDTO
classic_object = self._eeprom_controller.read(eeprom_models.ShutterConfiguration, shutter_id)
return ShutterMapper.orm_to_dto(classic_object)
@communication_enabled
def load_shutters(self): # type: () -> List[ShutterDTO]
return [ShutterMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.ShutterConfiguration)]
@communication_enabled
def save_shutters(self, shutters): # type: (List[ShutterDTO]) -> None
batch = []
for shutter in shutters:
batch.append(ShutterMapper.dto_to_orm(shutter))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def _refresh_shutter_states(self):
self._shutter_config = {shutter.id: shutter for shutter in self.load_shutters()}
number_of_shutter_modules = self._master_communicator.do_command(master_api.number_of_io_modules())['shutter']
for module_id in range(number_of_shutter_modules):
self._update_from_master_state(
{'module_nr': module_id,
'status': self._master_communicator.do_command(master_api.shutter_status(self._master_version),
{'module_nr': module_id})['status']}
)
self._shutters_last_updated = time.time()
def _on_master_shutter_change(self, data):
self._update_from_master_state(data)
def _update_from_master_state(self, data):
"""
Called with Master event information.
"""
module_id = data['module_nr']
new_state = self._interprete_output_states(module_id, data['status'])
if new_state is None:
return # Failsafe for master event handler
for i in range(4):
shutter_id = module_id * 4 + i
event_data = {'id': shutter_id,
'status': new_state[i],
'location': {'room_id': self._shutter_config[shutter_id].room}}
master_event = MasterEvent(event_type=MasterEvent.Types.SHUTTER_CHANGE, data=event_data)
self._pubsub.publish_master_event(PubSub.MasterTopics.SHUTTER, master_event)
def _interprete_output_states(self, module_id, output_states):
states = []
for i in range(4):
shutter_id = module_id * 4 + i
if shutter_id not in self._shutter_config:
return # Failsafe for master event handler
# first_up = 0 -> output 0 = up, output 1 = down
# first_up = 1 -> output 0 = down, output 1 = up
first_up = 0 if self._shutter_config[shutter_id].up_down_config == 0 else 1
up = (output_states >> (i * 2 + (1 - first_up))) & 0x1
down = (output_states >> (i * 2 + first_up)) & 0x1
if up == 1 and down == 0:
states.append(ShutterEnums.State.GOING_UP)
elif down == 1 and up == 0:
states.append(ShutterEnums.State.GOING_DOWN)
else: # Both are off or - unlikely - both are on
states.append(ShutterEnums.State.STOPPED)
return states
@communication_enabled
def shutter_group_up(self, shutter_group_id, timer=None): # type: (int, Optional[int]) -> None
if not (0 <= shutter_group_id <= 30):
raise ValueError('ShutterGroup ID {0} not in range 0 <= id <= 30'.format(shutter_group_id))
if timer is not None:
if self._master_version is None or self._master_version < (3, 143, 113):
raise NotImplementedError(
'Shutter group up with a timer is not supported on Master version {0}'.format(self._master_version))
self.do_basic_action(master_api.BA_SHUTTER_GROUP_UP, shutter_group_id, parameter=timer)
self.do_basic_action(master_api.BA_SHUTTER_GROUP_UP, shutter_group_id)
@communication_enabled
def shutter_group_down(self, shutter_group_id, timer=None): # type: (int, Optional[int]) -> None
if not (0 <= shutter_group_id <= 30):
raise ValueError('ShutterGroup ID {0} not in range 0 <= id <= 30'.format(shutter_group_id))
if timer is not None:
if self._master_version is None or self._master_version < (3, 143, 113):
raise NotImplementedError(
'Shutter group down with a timer is not supported on Master version {0}'.format(self._master_version))
self.do_basic_action(master_api.BA_SHUTTER_GROUP_UP, shutter_group_id, parameter=timer)
self.do_basic_action(master_api.BA_SHUTTER_GROUP_UP, shutter_group_id)
@communication_enabled
def shutter_group_stop(self, shutter_group_id): # type: (int) -> None
if not (0 <= shutter_group_id <= 30):
raise ValueError('ShutterGroup ID {0} not in range 0 <= id <= 30'.format(shutter_group_id))
self.do_basic_action(master_api.BA_SHUTTER_GROUP_STOP, shutter_group_id)
@communication_enabled
def load_shutter_group(self, shutter_group_id): # type: (int) -> ShutterGroupDTO
classic_object = self._eeprom_controller.read(eeprom_models.ShutterGroupConfiguration, shutter_group_id)
return ShutterGroupMapper.orm_to_dto(classic_object)
@communication_enabled
def load_shutter_groups(self): # type: () -> List[ShutterGroupDTO]
return [ShutterGroupMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.ShutterGroupConfiguration)]
@communication_enabled
def save_shutter_groups(self, shutter_groups): # type: (List[ShutterGroupDTO]) -> None
batch = []
for shutter_group in shutter_groups:
batch.append(ShutterGroupMapper.dto_to_orm(shutter_group))
self._eeprom_controller.write_batch(batch)
# Thermostats
@communication_enabled
def set_thermostat_mode(self, mode):
# type: (int) -> None
self.do_basic_action(master_api.BA_THERMOSTAT_MODE, mode)
@communication_enabled
def set_thermostat_cooling_heating(self, mode):
# type: (int) -> None
self.do_basic_action(master_api.BA_THERMOSTAT_COOLING_HEATING, mode)
@communication_enabled
def set_thermostat_automatic(self, action_number):
# type: (int) -> None
self.do_basic_action(master_api.BA_THERMOSTAT_AUTOMATIC, action_number)
@communication_enabled
def set_thermostat_all_setpoints(self, setpoint):
# type: (int) -> None
self.do_basic_action(
getattr(master_api, 'BA_ALL_SETPOINT_{0}'.format(setpoint)), 0
)
@communication_enabled
def set_thermostat_setpoint(self, thermostat_id, setpoint):
# type: (int, int) -> None
self.do_basic_action(
getattr(master_api, 'BA_ONE_SETPOINT_{0}'.format(setpoint)), thermostat_id
)
@communication_enabled
def write_thermostat_setpoint(self, thermostat_id, temperature):
# type: (int, float) -> None
self._master_communicator.do_command(
master_api.write_setpoint(),
{'thermostat': thermostat_id,
'config': 0,
'temp': master_api.Svt.temp(temperature)}
)
@communication_enabled
def set_thermostat_tenant_auto(self, thermostat_id):
# type: (int) -> None
self.do_basic_action(master_api.BA_THERMOSTAT_TENANT_AUTO, thermostat_id)
@communication_enabled
def set_thermostat_tenant_manual(self, thermostat_id):
# type: (int) -> None
self.do_basic_action(master_api.BA_THERMOSTAT_TENANT_MANUAL, thermostat_id)
@communication_enabled
def get_thermostats(self):
# type: () -> Dict[str,Any]
return self._master_communicator.do_command(master_api.thermostat_list())
@communication_enabled
def get_thermostat_modes(self):
# type: () -> Dict[str,Any]
return self._master_communicator.do_command(master_api.thermostat_mode_list())
@communication_enabled
def load_airco_status(self):
# type: () -> ThermostatAircoStatusDTO
data = self._master_communicator.do_command(master_api.read_airco_status_bits())
return ThermostatAircoStatusDTO({i: data['ASB{0}'.format(i)] == 1 for i in range(32)})
@communication_enabled
def set_airco_status(self, thermostat_id, airco_on):
# type: (int, bool) -> None
self.do_basic_action(
master_api.BA_THERMOSTAT_AIRCO_STATUS, thermostat_id + (0 if airco_on else 100)
)
@communication_enabled
def load_heating_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO
classic_object = self._eeprom_controller.read(eeprom_models.ThermostatConfiguration, thermostat_id)
return ThermostatMapper.orm_to_dto(classic_object)
@communication_enabled
def load_heating_thermostats(self): # type: () -> List[ThermostatDTO]
return [ThermostatMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.ThermostatConfiguration)]
@communication_enabled
def save_heating_thermostats(self, thermostats): # type: (List[ThermostatDTO]) -> None
batch = []
for thermostat in thermostats:
batch.append(ThermostatMapper.dto_to_orm(ThermostatConfiguration, thermostat))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_cooling_thermostat(self, thermostat_id): # type: (int) -> ThermostatDTO
classic_object = self._eeprom_controller.read(eeprom_models.CoolingConfiguration, thermostat_id)
return ThermostatMapper.orm_to_dto(classic_object)
@communication_enabled
def load_cooling_thermostats(self): # type: () -> List[ThermostatDTO]
return [ThermostatMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.CoolingConfiguration)]
@communication_enabled
def save_cooling_thermostats(self, thermostats): # type: (List[ThermostatDTO]) -> None
batch = []
for thermostat in thermostats:
batch.append(ThermostatMapper.dto_to_orm(CoolingConfiguration, thermostat))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_cooling_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO
classic_object = self._eeprom_controller.read(CoolingPumpGroupConfiguration, pump_group_id)
return PumpGroupMapper.orm_to_dto(classic_object)
@communication_enabled
def load_cooling_pump_groups(self): # type: () -> List[PumpGroupDTO]
return [PumpGroupMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(CoolingPumpGroupConfiguration)]
@communication_enabled
def save_cooling_pump_groups(self, pump_groups): # type: (List[PumpGroupDTO]) -> None
batch = []
for pump_group in pump_groups:
batch.append(PumpGroupMapper.dto_to_orm(CoolingPumpGroupConfiguration, pump_group))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_global_rtd10(self): # type: () -> GlobalRTD10DTO
classic_object = self._eeprom_controller.read(GlobalRTD10Configuration)
return GlobalRTD10Mapper.orm_to_dto(classic_object)
@communication_enabled
def save_global_rtd10(self, global_rtd10): # type: (GlobalRTD10DTO) -> None
classic_object = GlobalRTD10Mapper.dto_to_orm(global_rtd10)
self._eeprom_controller.write(classic_object)
@communication_enabled
def load_heating_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO
classic_object = self._eeprom_controller.read(RTD10HeatingConfiguration, rtd10_id)
return RTD10Mapper.orm_to_dto(classic_object)
@communication_enabled
def load_heating_rtd10s(self): # type: () -> List[RTD10DTO]
return [RTD10Mapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(RTD10HeatingConfiguration)]
@communication_enabled
def save_heating_rtd10s(self, rtd10s): # type: (List[RTD10DTO]) -> None
batch = []
for rtd10_dto in rtd10s:
batch.append(RTD10Mapper.dto_to_orm(RTD10HeatingConfiguration, rtd10_dto))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_cooling_rtd10(self, rtd10_id): # type: (int) -> RTD10DTO
classic_object = self._eeprom_controller.read(RTD10CoolingConfiguration, rtd10_id)
return RTD10Mapper.orm_to_dto(classic_object)
@communication_enabled
def load_cooling_rtd10s(self): # type: () -> List[RTD10DTO]
return [RTD10Mapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(RTD10CoolingConfiguration)]
@communication_enabled
def save_cooling_rtd10s(self, rtd10s): # type: (List[RTD10DTO]) -> None
batch = []
for rtd10_dto in rtd10s:
batch.append(RTD10Mapper.dto_to_orm(RTD10CoolingConfiguration, rtd10_dto))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_thermostat_group(self):
# type: () -> ThermostatGroupDTO
classic_object = self._eeprom_controller.read(GlobalThermostatConfiguration)
return ThermostatGroupMapper.orm_to_dto(classic_object)
@communication_enabled
def save_thermostat_group(self, thermostat_group): # type: (ThermostatGroupDTO) -> None
if thermostat_group.outside_sensor_id is None:
# Works around a master issue where the thermostat would be turned off in case there is no outside sensor.
thermostat_group.threshold_temperature = 50
classic_object = ThermostatGroupMapper.dto_to_orm(thermostat_group)
self._eeprom_controller.write(classic_object)
@communication_enabled
def load_heating_pump_group(self, pump_group_id): # type: (int) -> PumpGroupDTO
classic_object = self._eeprom_controller.read(PumpGroupConfiguration, pump_group_id)
return PumpGroupMapper.orm_to_dto(classic_object)
@communication_enabled
def load_heating_pump_groups(self): # type: () -> List[PumpGroupDTO]
return [PumpGroupMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(PumpGroupConfiguration)]
@communication_enabled
def save_heating_pump_groups(self, pump_groups): # type: (List[PumpGroupDTO]) -> None
batch = []
for pump_group in pump_groups:
batch.append(PumpGroupMapper.dto_to_orm(PumpGroupConfiguration, pump_group))
self._eeprom_controller.write_batch(batch)
# Virtual modules
@communication_enabled
def add_virtual_output_module(self):
# type: () -> None
self._master_communicator.do_command(master_api.add_virtual_module(), {'vmt': 'o'})
self._broadcast_module_discovery()
@communication_enabled
def add_virtual_dim_control_module(self):
# type: () -> None
self._master_communicator.do_command(master_api.add_virtual_module(), {'vmt': 'd'})
self._broadcast_module_discovery()
@communication_enabled
def add_virtual_input_module(self):
# type: () -> None
self._master_communicator.do_command(master_api.add_virtual_module(), {'vmt': 'i'})
self._broadcast_module_discovery()
@communication_enabled
def add_virtual_sensor_module(self):
# type: () -> None
raise UnsupportedException()
# Generic
@communication_enabled
def get_status(self):
""" Get the status of the Master.
:returns: dict with 'time' (HH:MM), 'date' (DD:MM:YYYY), 'mode', 'version' (a.b.c)
and 'hw_version' (hardware version)
"""
out_dict = self._master_communicator.do_command(master_api.status())
return {'time': '%02d:%02d' % (out_dict['hours'], out_dict['minutes']),
'date': '%02d/%02d/%d' % (out_dict['day'], out_dict['month'], out_dict['year']),
'mode': out_dict['mode'],
'version': '%d.%d.%d' % (out_dict['f1'], out_dict['f2'], out_dict['f3']),
'hw_version': out_dict['h']}
@communication_enabled
def get_modules(self):
""" Get a list of all modules attached and registered with the master.
:returns: Dict with:
* 'outputs' (list of module types: O,R,D),
* 'inputs' (list of input module types: I,T,L,C)
* 'shutters' (List of modules types: S).
"""
mods = self._master_communicator.do_command(master_api.number_of_io_modules())
inputs = []
outputs = []
shutters = []
can_inputs = []
for i in range(mods['in']):
ret = self._master_communicator.do_command(
master_api.read_eeprom(),
{'bank': 2 + i, 'addr': 252, 'num': 1}
)
module_type = chr(ret['data'][0])
is_can = module_type == 'C'
ret = self._master_communicator.do_command(
master_api.read_eeprom(),
{'bank': 2 + i, 'addr': 0, 'num': 1}
)
module_type = chr(ret['data'][0])
if is_can:
can_inputs.append(module_type)
else:
inputs.append(module_type)
for i in range(mods['out']):
ret = self._master_communicator.do_command(
master_api.read_eeprom(),
{'bank': 33 + i, 'addr': 0, 'num': 1}
)
module_type = chr(ret['data'][0])
outputs.append(module_type)
for shutter in range(mods['shutter']):
shutters.append('S')
if len(can_inputs) > 0 and 'C' not in can_inputs:
can_inputs.append('C') # First CAN enabled installations didn't had this in the eeprom yet
return {'outputs': outputs, 'inputs': inputs, 'shutters': shutters, 'can_inputs': can_inputs}
@staticmethod
def _format_address(address_bytes):
return '{0:03}.{1:03}.{2:03}.{3:03}'.format(address_bytes[0],
address_bytes[1],
address_bytes[2],
address_bytes[3])
@communication_enabled
def get_modules_information(self): # type: () -> List[ModuleDTO]
""" Gets module information """
def get_master_version(_module_address):
try:
_module_version = self._master_communicator.do_command(master_api.get_module_version(),
{'addr': _module_address.bytes},
extended_crc=True,
timeout=5)
_firmware_version = '{0}.{1}.{2}'.format(_module_version['f1'], _module_version['f2'], _module_version['f3'])
return True, _module_version['hw_version'], _firmware_version
except CommunicationTimedOutException:
return False, None, None
information = []
module_type_lookup = {'c': ModuleDTO.ModuleType.CAN_CONTROL,
't': ModuleDTO.ModuleType.SENSOR,
'i': ModuleDTO.ModuleType.INPUT,
'o': ModuleDTO.ModuleType.OUTPUT,
'r': ModuleDTO.ModuleType.SHUTTER,
'd': ModuleDTO.ModuleType.DIM_CONTROL}
no_modules = self._master_communicator.do_command(master_api.number_of_io_modules())
for i in range(no_modules['in']):
is_can = self._eeprom_controller.read_address(EepromAddress(2 + i, 252, 1)).bytes == bytearray(b'C')
module_address = self._eeprom_controller.read_address(EepromAddress(2 + i, 0, 4))
module_type_letter = chr(module_address.bytes[0]).lower()
is_virtual = chr(module_address.bytes[0]).islower()
formatted_address = MasterClassicController._format_address(module_address.bytes)
hardware_type = ModuleDTO.HardwareType.PHYSICAL
if is_virtual:
hardware_type = ModuleDTO.HardwareType.VIRTUAL
elif is_can and module_type_letter != 'c':
hardware_type = ModuleDTO.HardwareType.EMULATED
dto = ModuleDTO(source=ModuleDTO.Source.MASTER,
address=formatted_address,
module_type=module_type_lookup.get(module_type_letter),
hardware_type=hardware_type,
order=i)
if hardware_type == ModuleDTO.HardwareType.PHYSICAL:
dto.online, dto.hardware_version, dto.firmware_version = get_master_version(module_address)
information.append(dto)
for i in range(no_modules['out']):
module_address = self._eeprom_controller.read_address(EepromAddress(33 + i, 0, 4))
module_type_letter = chr(module_address.bytes[0]).lower()
is_virtual = chr(module_address.bytes[0]).islower()
formatted_address = MasterClassicController._format_address(module_address.bytes)
dto = ModuleDTO(source=ModuleDTO.Source.MASTER,
address=formatted_address,
module_type=module_type_lookup.get(module_type_letter),
hardware_type=(ModuleDTO.HardwareType.VIRTUAL if is_virtual else
ModuleDTO.HardwareType.PHYSICAL),
order=i)
if not is_virtual:
dto.online, dto.hardware_version, dto.firmware_version = get_master_version(module_address)
information.append(dto)
for i in range(no_modules['shutter']):
module_address = self._eeprom_controller.read_address(EepromAddress(33 + i, 173, 4))
module_type_letter = chr(module_address.bytes[0]).lower()
is_virtual = chr(module_address.bytes[0]).islower()
formatted_address = MasterClassicController._format_address(module_address.bytes)
dto = ModuleDTO(source=ModuleDTO.Source.MASTER,
address=formatted_address,
module_type=module_type_lookup.get(module_type_letter),
hardware_type=(ModuleDTO.HardwareType.VIRTUAL if is_virtual else
ModuleDTO.HardwareType.PHYSICAL),
order=i)
if not is_virtual:
dto.online, dto.hardware_version, dto.firmware_version = get_master_version(module_address)
information.append(dto)
return information
def replace_module(self, old_address, new_address): # type: (str, str) -> None
old_address_bytes = bytearray([int(part) for part in old_address.split('.')])
new_address_bytes = bytearray([int(part) for part in new_address.split('.')])
no_modules = self._master_communicator.do_command(master_api.number_of_io_modules())
amount_of_inputs = no_modules['in']
for i in range(amount_of_inputs):
eeprom_address = EepromAddress(2 + i, 0, 4)
module_address = self._eeprom_controller.read_address(eeprom_address).bytes
if module_address == old_address_bytes:
new_module_address = self._eeprom_controller.read_address(EepromAddress(2 + amount_of_inputs - 1, 0, 4)).bytes
if new_module_address == new_address_bytes:
self._eeprom_controller.write_address(eeprom_address, new_address_bytes)
self._eeprom_controller.write_address(EepromAddress(0, 1, 1), bytearray([amount_of_inputs - 1]))
self._eeprom_controller.activate()
logger.warn('Replaced {0} by {1}'.format(old_address, new_address))
return
amount_of_outputs = no_modules['out']
for i in range(amount_of_outputs):
eeprom_address = EepromAddress(33 + i, 0, 4)
module_address = self._eeprom_controller.read_address(eeprom_address).bytes
if module_address == old_address_bytes:
new_module_address = self._eeprom_controller.read_address(EepromAddress(33 + amount_of_outputs - 1, 0, 4)).bytes
if new_module_address == new_address_bytes:
self._eeprom_controller.write_address(eeprom_address, new_address_bytes)
self._eeprom_controller.write_address(EepromAddress(0, 2, 1), bytearray([amount_of_outputs - 1]))
self._eeprom_controller.activate()
logger.warn('Replaced {0} by {1}'.format(old_address, new_address))
return
amount_of_shutters = no_modules['shutter']
for i in range(amount_of_shutters):
eeprom_address = EepromAddress(33 + i, 173, 4)
module_address = self._eeprom_controller.read_address(eeprom_address).bytes
if module_address == old_address_bytes:
new_module_address = self._eeprom_controller.read_address(EepromAddress(33 + amount_of_shutters - 1, 173, 4)).bytes
if new_module_address == new_address_bytes:
self._eeprom_controller.write_address(eeprom_address, new_address_bytes)
self._eeprom_controller.write_address(EepromAddress(0, 3, 1), bytearray([amount_of_shutters - 1]))
self._eeprom_controller.activate()
logger.warn('Replaced {0} by {1}'.format(old_address, new_address))
return
raise RuntimeError('Could not correctly match modules {0} and {1}'.format(old_address, new_address))
@communication_enabled
def flash_leds(self, led_type, led_id): # type: (int, int) -> str
"""
Flash the leds on the module for an output/input/sensor.
:param led_type: The module type, see `IndicateType`.
:param led_id: The id of the output/input/sensor.
"""
ret = self._master_communicator.do_command(master_api.indicate(),
{'type': led_type, 'id': led_id})
return ret['resp']
@communication_enabled
def get_backup(self):
"""
Get a backup of the eeprom of the master.
:returns: String of bytes (size = 64kb).
"""
retry = None
output = bytearray()
bank = 0
while bank < 256:
try:
output += self._master_communicator.do_command(
master_api.eeprom_list(),
{'bank': bank}
)['data']
bank += 1
except CommunicationTimedOutException:
if retry == bank:
raise
retry = bank
logger.warning('Got timeout reading bank {0}. Retrying...'.format(bank))
time.sleep(2) # Doing heavy reads on eeprom can exhaust the master. Give it a bit room to breathe.
return ''.join(chr(c) for c in output)
def factory_reset(self, can=False):
# type: (bool) -> None
# Wipe CC EEPROM
# https://wiki.openmotics.com/index.php/API_Reference_Guide#FX_-.3E_Erase_external_Eeprom_slave_modules_and_perform_factory_reset
# Erasing CAN EEPROM first because the master needs to have the module information
if can:
self.can_control_factory_reset()
# Wipe master EEPROM
data = chr(255) * (256 * 256)
self.restore(data)
def can_control_factory_reset(self):
mods = self._master_communicator.do_command(master_api.number_of_io_modules())
for i in range(mods['in']):
is_can = self._eeprom_controller.read_address(EepromAddress(2 + i, 252, 1)).bytes == bytearray(b'C')
if is_can:
module_address = self._eeprom_controller.read_address(EepromAddress(2 + i, 0, 4))
module_type_letter = chr(module_address.bytes[0]).lower()
is_virtual = chr(module_address.bytes[0]).islower()
formatted_address = MasterClassicController._format_address(module_address.bytes)
if not is_virtual and module_type_letter == 'c':
try:
logging.info("Resetting CAN EEPROM, adress: {0} ".format(formatted_address))
self._master_communicator.do_command(master_api.erase_can_eeprom(),
{'addr': module_address.bytes, 'instr': 0},
extended_crc=True, timeout=5)
except CommunicationTimedOutException:
logger.error('Got communication timeout during FX call')
def cold_reset(self, power_on=True):
# type: (bool) -> None
"""
Perform a cold reset on the master. Turns the power off, waits 5 seconds and turns the power back on.
"""
MasterClassicController._set_master_power(False)
if power_on:
time.sleep(5)
MasterClassicController._set_master_power(True)
self._master_communicator.reset_communication_statistics()
@communication_enabled
def raw_action(self, action, size, data=None):
# type: (str, int, Optional[bytearray]) -> Dict[str,Any]
"""
Send a raw action to the master.
"""
return self._master_communicator.do_raw_action(action, size, data=data)
@Inject
def update_master(self, hex_filename, controller_serial=INJECTED):
# type: (str, Serial) -> None
try:
self._communication_enabled = False
self._heartbeat.stop()
self._master_communicator.update_mode_start()
port = controller_serial.port # type: ignore
baudrate = str(controller_serial.baudrate) # type: ignore
base_command = ['/opt/openmotics/bin/AN1310cl', '-d', port, '-b', baudrate]
timings = [[2, 2, 2, 2], [2, 2, 2, 1],
[2, 2, 3, 2], [2, 2, 3, 1],
[2, 2, 4, 2], [2, 2, 4, 1]]
logger.info('Updating master...')
logger.info('* Enter bootloader...')
bootloader_active = False
for timing in timings:
# Setting this condition will assert a break condition on TX to which the bootloader will react.
controller_serial.break_condition = True
time.sleep(timing[0])
MasterClassicController._set_master_power(False)
time.sleep(timing[1])
MasterClassicController._set_master_power(True)
time.sleep(timing[2])
# After the bootloader is active, release the break condition to free up TX for subsequent communications
controller_serial.break_condition = False
time.sleep(timing[3])
logger.info('* Verify bootloader...')
try:
response = str(subprocess.check_output(base_command + ['-s']))
# Expected response:
# > Serial Bootloader AN1310 v1.05r
# > Copyright (c) 2010-2011, Microchip Technology Inc.
# >
# > Using /dev/ttyO5 at 115200 bps
# > Connecting...
# > Bootloader Firmware v1.05
# > PIC18F67J11 Revision 10
match = re.findall(pattern=r'Bootloader Firmware (v[0-9]+\.[0-9]+).*(PIC.*) Revision',
string=response,
flags=re.DOTALL)
if not match:
logger.info('Bootloader response did not match: {0}'.format(response))
continue
logger.debug(response)
logger.info(' * Bootloader information: {1} bootloader {0}'.format(*match[0]))
bootloader_active = True
break
except subprocess.CalledProcessError as ex:
logger.info(ex.output)
raise
if bootloader_active is False:
raise RuntimeError('Failed to go into Bootloader - try other timings')
logger.info('* Flashing...')
try:
response = str(subprocess.check_output(base_command + ['-p ', '-c', hex_filename]))
logger.debug(response)
except subprocess.CalledProcessError as ex:
logger.info(ex.output)
raise
logger.info('* Verifying...')
try:
response = str(subprocess.check_output(base_command + ['-v', hex_filename]))
logger.debug(response)
except subprocess.CalledProcessError as ex:
logger.info(ex.output)
raise
logger.info('* Entering application...')
try:
response = str(subprocess.check_output(base_command + ['-r']))
logger.debug(response)
except subprocess.CalledProcessError as ex:
logger.info(ex.output)
raise
logger.info('Update completed')
finally:
self._master_communicator.update_mode_stop()
self._heartbeat.start()
self._communication_enabled = True
@Inject
def update_slave_modules(self, module_type, hex_filename):
# type: (str, str) -> None
try:
self._communication_enabled = False
self._heartbeat.stop()
bootload_modules(module_type, hex_filename, False, None)
finally:
self._heartbeat.start()
self._communication_enabled = True
@staticmethod
def _set_master_power(on):
with open('/sys/class/gpio/gpio44/direction', 'w') as gpio:
gpio.write('out')
with open('/sys/class/gpio/gpio44/value', 'w') as gpio:
gpio.write('1' if on else '0')
@communication_enabled
def reset(self):
""" Reset the master.
:returns: emtpy dict.
"""
self._master_communicator.do_command(master_api.reset())
return dict()
def power_cycle_master(self):
self.cold_reset()
return dict()
@communication_enabled
@Inject
def power_cycle_bus(self, energy_communicator=INJECTED):
""" Turns the power of both bussed off for 5 seconds """
self.do_basic_action(master_api.BA_POWER_CYCLE_BUS, 0)
if energy_communicator:
energy_communicator.reset_communication_statistics() # TODO cleanup, use an event instead?
@communication_enabled
def restore(self, data):
"""
Restore a backup of the eeprom of the master.
:param data: The eeprom backup to restore.
:type data: string of bytes (size = 64 kb).
:returns: dict with 'output' key (contains an array with the addresses that were written).
"""
ret = []
(num_banks, bank_size, write_size) = (256, 256, 10)
backup_data = bytearray(ord(c) for c in data)
for bank in range(0, num_banks):
current_data = self._master_communicator.do_command(master_api.eeprom_list(),
{'bank': bank})['data']
for addr in range(0, bank_size, write_size):
current = current_data[addr:addr + write_size]
new = backup_data[bank * bank_size + addr: bank * bank_size + addr + len(current)]
if new != current:
ret.append('B' + str(bank) + 'A' + str(addr))
self._master_communicator.do_command(
master_api.write_eeprom(),
{'bank': bank, 'address': addr, 'data': new}
)
self._master_communicator.do_command(master_api.activate_eeprom(), {'eep': 0},
timeout=5)
self.cold_reset()
ret.append('Activated eeprom')
self._eeprom_controller.invalidate_cache()
return {'output': ret}
@communication_enabled
def sync_time(self):
# type: () -> None
logger.info('Setting the time on the master.')
now = datetime.now()
self._master_communicator.do_command(
master_api.set_time(),
{'sec': now.second, 'min': now.minute, 'hours': now.hour,
'weekday': now.isoweekday(), 'day': now.day, 'month': now.month,
'year': now.year % 100}
)
def get_configuration_dirty_flag(self):
# type: () -> bool
dirty = self._eeprom_controller.dirty
# FIXME: this assumes a full sync will finish after this is called eg.
# a response timeout clears the dirty state while no sync would started
# on the remote side.
self._eeprom_controller.dirty = False
return dirty
# Module functions
def _process_module_initialize_message(self, api_data):
# type: (Dict[str, Any]) -> None
"""
Create a log entry when the MI message is received.
> {'instr': 'E', 'module_nr': 0, 'io_type': 2, 'padding': '', 'literal': '', 'data': 1, 'id': 'I@7%'}
"""
try:
code_map = {'N': 'New',
'E': 'Existing',
'D': 'Duplicate'}
category_map = {0: 'SHUTTER',
1: 'OUTPUT',
2: 'INPUT'}
address = MasterClassicController._format_address(api_data['id'])
module_type = chr(api_data['id'][0])
with self._module_log_lock:
self._module_log.append({'code': code_map.get(api_data['instr'], 'UNKNOWN').upper(),
'module_nr': api_data['module_nr'],
'category': category_map[api_data['io_type']],
'module_type': module_type,
'address': address})
logger.info('Initialize/discovery - {0} module found: {1} ({2})'.format(
code_map.get(api_data['instr'], 'Unknown'),
api_data['id'][0],
address
))
except Exception:
logger.exception('Could not process initialization message')
@communication_enabled
def module_discover_start(self, timeout): # type: (int) -> None
def _stop(): self.module_discover_stop()
logger.debug('triggering module discovery start')
self._master_communicator.do_command(master_api.module_discover_start())
if self._discover_mode_timer is not None:
self._discover_mode_timer.cancel()
self._discover_mode_timer = Timer(timeout, _stop)
self._discover_mode_timer.start()
with self._module_log_lock:
self._module_log = []
@communication_enabled
def module_discover_stop(self): # type: () -> None
logger.debug('triggering module discovery stop')
if self._discover_mode_timer is not None:
self._discover_mode_timer.cancel()
self._discover_mode_timer = None
self._master_communicator.do_command(master_api.module_discover_stop())
self._broadcast_module_discovery()
with self._module_log_lock:
self._module_log = []
def module_discover_status(self): # type: () -> bool
return self._discover_mode_timer is not None
def get_module_log(self): # type: () -> List[Dict[str, Any]]
with self._module_log_lock:
(log, self._module_log) = (self._module_log, [])
return log
def _broadcast_module_discovery(self):
# type: () -> None
self._eeprom_controller.invalidate_cache()
master_event = MasterEvent(event_type=MasterEvent.Types.MODULE_DISCOVERY, data={})
self._pubsub.publish_master_event(PubSub.MasterTopics.MODULE, master_event)
# Error functions
@communication_enabled
def error_list(self):
""" Get the error list per module (input and output modules). The modules are identified by
O1, O2, I1, I2, ...
:returns: dict with 'errors' key, it contains list of tuples (module, nr_errors).
"""
error_list = self._master_communicator.do_command(master_api.error_list())
return error_list['errors']
@communication_enabled
def last_success(self):
""" Get the number of seconds since the last successful communication with the master.
"""
return self._master_communicator.get_seconds_since_last_success()
@communication_enabled
def clear_error_list(self):
""" Clear the number of errors.
:returns: empty dict.
"""
self._master_communicator.do_command(master_api.clear_error_list())
return dict()
@communication_enabled
def set_status_leds(self, status):
""" Set the status of the leds on the master.
:param status: whether the leds should be on or off.
:type status: boolean.
:returns: empty dict.
"""
on = 1 if status is True else 0
self.do_basic_action(master_api.BA_STATUS_LEDS, on)
return dict()
# (Group)Actions
@communication_enabled
def do_basic_action(self, action_type, action_number, parameter=None, timeout=2): # type: (int, int, Optional[int], int) -> None
"""
Execute a basic action.
:param action_type: The type of the action as defined by the master api.
:param action_number: The number provided to the basic action, its meaning depends on the action_type.
:param parameter: An (optional) parameter for the basic action
:param timeout: An (optional) timeout for the basic action
"""
if action_type < 0 or action_type > 254:
raise ValueError('action_type not in [0, 254]: %d' % action_type)
if action_number < 0 or action_number > 255:
raise ValueError('action_number not in [0, 255]: %d' % action_number)
fields = {'action_type': action_type,
'action_number': action_number}
if parameter is None:
logger.info('BA: Execute {0} {1}'.format(action_type, action_number))
command_spec = master_api.basic_action(self._master_version)
else:
if parameter < 0 or parameter > 65535:
raise ValueError('parameter not in [0, 65535]: %d' % parameter)
fields.update({'parameter': parameter})
logger.info('BA: Execute {0} {1} P {2}'.format(action_type, action_number, parameter))
command_spec = master_api.basic_action(self._master_version, use_param=True)
self._master_communicator.do_command(command_spec, fields=fields, timeout=timeout)
@communication_enabled
def do_group_action(self, group_action_id): # type: (int) -> None
if group_action_id < 0 or group_action_id > 159:
raise ValueError('group_action_id not in [0, 160]: %d' % group_action_id)
self.do_basic_action(master_api.BA_GROUP_ACTION, group_action_id)
@communication_enabled
def load_group_action(self, group_action_id): # type: (int) -> GroupActionDTO
classic_object = self._eeprom_controller.read(eeprom_models.GroupActionConfiguration, group_action_id)
return GroupActionMapper.orm_to_dto(classic_object)
@communication_enabled
def load_group_actions(self): # type: () -> List[GroupActionDTO]
return [GroupActionMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.GroupActionConfiguration)]
@communication_enabled
def save_group_actions(self, group_actions): # type: (List[GroupActionDTO]) -> None
batch = []
for group_action in group_actions:
batch.append(GroupActionMapper.dto_to_orm(group_action))
self._eeprom_controller.write_batch(batch)
# Schedules
@communication_enabled
def load_scheduled_action(self, scheduled_action_id): # type: (int) -> LegacyScheduleDTO
classic_object = self._eeprom_controller.read(ScheduledActionConfiguration, scheduled_action_id)
return LegacyScheduleMapper.orm_to_dto(classic_object)
@communication_enabled
def load_scheduled_actions(self): # type: () -> List[LegacyScheduleDTO]
return [LegacyScheduleMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(ScheduledActionConfiguration)]
@communication_enabled
def save_scheduled_actions(self, scheduled_actions): # type: (List[LegacyScheduleDTO]) -> None
batch = []
for schedule in scheduled_actions:
batch.append(LegacyScheduleMapper.dto_to_orm(schedule))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def load_startup_action(self): # type: () -> LegacyStartupActionDTO
classic_object = self._eeprom_controller.read(StartupActionConfiguration)
return LegacyStartupActionMapper.orm_to_dto(classic_object)
@communication_enabled
def save_startup_action(self, startup_action):
# type: (LegacyStartupActionDTO) -> None
self._eeprom_controller.write(LegacyStartupActionMapper.dto_to_orm(startup_action))
# Dimmer functions
@communication_enabled
def load_dimmer_configuration(self):
# type: () -> DimmerConfigurationDTO
classic_object = self._eeprom_controller.read(DimmerConfiguration)
return DimmerConfigurationMapper.orm_to_dto(classic_object)
@communication_enabled
def save_dimmer_configuration(self, dimmer_configuration_dto):
# type: (DimmerConfigurationDTO) -> None
self._eeprom_controller.write(DimmerConfigurationMapper.dto_to_orm(dimmer_configuration_dto))
# Can Led functions
@communication_enabled
def load_global_feedback(self, global_feedback_id): # type: (int) -> GlobalFeedbackDTO
classic_object = self._eeprom_controller.read(eeprom_models.CanLedConfiguration, global_feedback_id)
return GlobalFeedbackMapper.orm_to_dto(classic_object)
@communication_enabled
def load_global_feedbacks(self): # type: () -> List[GlobalFeedbackDTO]
return [GlobalFeedbackMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.CanLedConfiguration)]
@communication_enabled
def save_global_feedbacks(self, global_feedbacks): # type: (List[GlobalFeedbackDTO]) -> None
batch = []
for global_feedback in global_feedbacks:
batch.append(GlobalFeedbackMapper.dto_to_orm(global_feedback))
self._eeprom_controller.write_batch(batch)
# All lights functions
@communication_enabled
def set_all_lights(self, action, output_ids=None):
# type: (Literal['ON', 'OFF', 'TOGGLE'], Optional[List[int]]) -> None
# TODO: Use output_ids if needed
if action == 'OFF':
self.do_basic_action(master_api.BA_ALL_LIGHTS_OFF, 0)
elif action == 'ON':
self.do_basic_action(master_api.BA_LIGHTS_ON_FLOOR, 255)
elif action == 'TOGGLE':
self.do_basic_action(master_api.BA_LIGHTS_TOGGLE_FLOOR, 255)
# Sensors
@communication_enabled
def _refresh_sensor_values(self): # type: () -> None
try:
# poll for latest sensor values
for i, value in enumerate(self.get_sensors_temperature()):
if value is None:
continue
master_event = MasterEvent(event_type=MasterEvent.Types.SENSOR_VALUE,
data={'sensor': i, 'type': MasterEvent.SensorType.TEMPERATURE, 'value': value})
self._pubsub.publish_master_event(PubSub.MasterTopics.SENSOR, master_event)
for i, value in enumerate(self.get_sensors_humidity()):
if value is None:
continue
master_event = MasterEvent(event_type=MasterEvent.Types.SENSOR_VALUE,
data={'sensor': i, 'type': MasterEvent.SensorType.HUMIDITY, 'value': value})
self._pubsub.publish_master_event(PubSub.MasterTopics.SENSOR, master_event)
for i, value in enumerate(self.get_sensors_brightness()):
if value is None:
continue
master_event = MasterEvent(event_type=MasterEvent.Types.SENSOR_VALUE,
data={'sensor': i, 'type': MasterEvent.SensorType.BRIGHTNESS, 'value': value})
self._pubsub.publish_master_event(PubSub.MasterTopics.SENSOR, master_event)
except NotImplementedError as e:
logger.error('Cannot refresh sensors: {}'.format(e))
self._sensor_last_updated = time.time()
def get_sensor_temperature(self, sensor_id):
if sensor_id is None or sensor_id < 0 or sensor_id > 31:
raise ValueError('Sensor ID {0} not in range 0 <= id <= 31'.format(sensor_id))
return self.get_sensors_temperature()[sensor_id]
@communication_enabled
def get_sensors_temperature(self):
temperatures = []
sensor_list = self._master_communicator.do_command(master_api.sensor_temperature_list())
for i in range(32):
temperatures.append(sensor_list['tmp{0}'.format(i)].get_temperature())
return temperatures
@communication_enabled
def get_sensor_humidity(self, sensor_id):
if sensor_id is None or sensor_id < 0 or sensor_id > 31:
raise ValueError('Sensor ID {0} not in range 0 <= id <= 31'.format(sensor_id))
return self.get_sensors_humidity()[sensor_id]
@communication_enabled
def get_sensors_humidity(self):
humidities = []
sensor_list = self._master_communicator.do_command(master_api.sensor_humidity_list())
for i in range(32):
humidities.append(sensor_list['hum{0}'.format(i)].get_humidity())
return humidities
def get_sensor_brightness(self, sensor_id):
if sensor_id is None or sensor_id < 0 or sensor_id > 31:
raise ValueError('Sensor ID {0} not in range 0 <= id <= 31'.format(sensor_id))
return self.get_sensors_brightness()[sensor_id]
@communication_enabled
def get_sensors_brightness(self):
brightnesses = []
sensor_list = self._master_communicator.do_command(master_api.sensor_brightness_list())
for i in range(32):
brightnesses.append(sensor_list['bri{0}'.format(i)].get_brightness())
return brightnesses
@communication_enabled
def set_virtual_sensor(self, sensor_id, temperature, humidity, brightness):
if sensor_id is None or sensor_id < 0 or sensor_id > 31:
raise ValueError('Sensor ID {0} not in range 0 <= id <= 31'.format(sensor_id))
self._master_communicator.do_command(
master_api.set_virtual_sensor(),
{'sensor': sensor_id,
'tmp': master_api.Svt.temp(temperature),
'hum': master_api.Svt.humidity(humidity),
'bri': master_api.Svt.brightness(brightness)}
)
@communication_enabled
def load_sensor(self, sensor_id): # type: (int) -> MasterSensorDTO
classic_object = self._eeprom_controller.read(eeprom_models.SensorConfiguration, sensor_id)
return SensorMapper.orm_to_dto(classic_object)
@communication_enabled
def load_sensors(self): # type: () -> List[MasterSensorDTO]
return [SensorMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.SensorConfiguration)]
@communication_enabled
def save_sensors(self, sensors): # type: (List[MasterSensorDTO]) -> None
batch = []
for sensor in sensors:
batch.append(SensorMapper.dto_to_orm(sensor))
self._eeprom_controller.write_batch(batch)
# PulseCounters
@communication_enabled
def load_pulse_counter(self, pulse_counter_id): # type: (int) -> PulseCounterDTO
classic_object = self._eeprom_controller.read(eeprom_models.PulseCounterConfiguration, pulse_counter_id)
return PulseCounterMapper.orm_to_dto(classic_object)
@communication_enabled
def load_pulse_counters(self): # type: () -> List[PulseCounterDTO]
return [PulseCounterMapper.orm_to_dto(o)
for o in self._eeprom_controller.read_all(eeprom_models.PulseCounterConfiguration)]
@communication_enabled
def save_pulse_counters(self, pulse_counters): # type: (List[PulseCounterDTO]) -> None
batch = []
for pulse_counter in pulse_counters:
batch.append(PulseCounterMapper.dto_to_orm(pulse_counter))
self._eeprom_controller.write_batch(batch)
@communication_enabled
def get_pulse_counter_values(self): # type: () -> Dict[int, int]
out_dict = self._master_communicator.do_command(master_api.pulse_list())
return {i: out_dict['pv{0}'.format(i)] for i in range(24)}
# Validation bits
@communication_enabled
def load_validation_bits(self): # type: () -> Optional[Dict[int, bool]]
if self._master_version is None or self._master_version < (3, 143, 102):
return None
number_of_bits = 256
bytes_per_call = 11
def load_bits_batch(start_bit): # type: (int) -> Dict[int, bool]
batch = {} # type: Dict[int, bool]
response = self._master_communicator.do_command(master_api.read_user_information(self._master_version),
{'information_type': 0, 'number': start_bit})
for byte_index in range(bytes_per_call):
for bit_index in range(8):
bit_nr = start_bit + (byte_index * 8) + bit_index
if bit_nr == number_of_bits:
return batch #
bit_value = bool(response['data'][byte_index] & (1 << bit_index))
batch[bit_nr] = bit_value
return batch
bits = {}
bit_pointer = 0
while True:
bits.update(load_bits_batch(bit_pointer))
bit_pointer = max(*bits.keys()) + 1
if bit_pointer == 256:
break
return bits
def _refresh_validation_bits(self):
current_bit_states = self.load_validation_bits()
if current_bit_states is not None:
self._validation_bits.full_update(current_bit_states)
self._validation_bits_last_updated = time.time()
def _on_master_validation_bit_change(self, bit_nr, value): # type: (int, bool) -> None
self._validation_bits.update(bit_nr, value)
def _validation_bit_changed(self, bit_nr, value):
# loop over all outputs and update the locked status if the bit_nr is associated with this output
for output_id, output_dto in six.iteritems(self._output_config):
if output_dto.lock_bit_id == bit_nr:
master_event = MasterEvent(event_type=MasterEvent.Types.OUTPUT_STATUS,
data={'state': OutputStatusDTO(id=output_id,
locked=value)})
self._pubsub.publish_master_event(PubSub.MasterTopics.OUTPUT, master_event)
| agpl-3.0 | 4,357,802,463,815,903,000 | 44.283008 | 138 | 0.598379 | false |
netgroup/Dreamer-VLL-Pusher | floodlight/vll_pusher.py | 1 | 24250 | #!/usr/bin/python
##############################################################################################
# Copyright (C) 2014 Pier Luigi Ventre - (Consortium GARR and University of Rome "Tor Vergata")
# Copyright (C) 2014 Giuseppe Siracusano, Stefano Salsano - (CNIT and University of Rome "Tor Vergata")
# www.garr.it - www.uniroma2.it/netgroup - www.cnit.it
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Virtual Leased Line Pusher.
#
# @author Pier Luigi Ventre <[email protected]>
# @author Giuseppe Siracusano <[email protected]>
# @author Stefano Salsano <[email protected]>
#
#
import os
import sys
import subprocess
import json
import argparse
import io
import time
import re
import siphash
# XXX Be Careful, For Now The Vll_Pusher Depends On vll_pusher.cfg; This file should be created by the [x] Deployer
# (x = Mininet Deployer, TestBeds Deployer)
# Parse vll options. Currently supports add and delete actions.
# Syntax:
# vll_pusher --controller {IP:REST_PORT} --add
# vll_pusher --controller {IP:REST_PORT} --delete
def parse_cmd_line():
parser = argparse.ArgumentParser(description='Virtual Leased Line Pusher')
parser.add_argument('--controller', dest='controllerRestIp', action='store', default='localhost:8080', help='controller IP:RESTport, e.g., localhost:8080 or A.B.C.D:8080')
parser.add_argument('--add', dest='action', action='store_const', const='add', default='add', help='action: add')
parser.add_argument('--delete', dest='action', action='store_const', const='delete', default='add', help='action: delete')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
return args
# Read From vll_pusher.cfg The Configuration For The Vlls
def read_conf_file():
global pusher_cfg
print "*** Read Configuration File For Vll Pusher"
path = "vll_pusher.cfg"
if os.path.exists(path):
conf = open(path,'r')
pusher_cfg = json.load(conf)
conf.close()
else:
print "No Configuration File Find In %s" % path
sys.exit(-2)
print "*** PUSHER_CFG", json.dumps(pusher_cfg, sort_keys=True, indent=4)
# Utility function for the vlls persisentce
def store_vll(name, dpid):
# Store created vll attributes in local ./vlls.json
datetime = time.asctime()
vllParams = {'name': name, 'Dpid':dpid, 'datetime':datetime}
str = json.dumps(vllParams)
vllsDb = open('./vlls.json','a+')
vllsDb.write(str+"\n")
vllsDb.close()
intf_to_port_number = {}
def convert_intf_to_port_number(controllerRestIP):
global intf_to_port_number
command = "curl -s http://%s/wm/core/controller/switches/json | python -mjson.tool" % (controllerRestIP)
result = os.popen(command).read()
parsedResult = json.loads(result)
default = None
for vll in pusher_cfg['vlls']:
lhs_intf = vll['lhs_intf']
lhs_dpid = vll['lhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (lhs_dpid, lhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == lhs_dpid:
for port in switch["ports"]:
if port["name"] == lhs_intf:
port_number = str(port["portNumber"])
intf_to_port_number["%s-%s" % (lhs_dpid, lhs_intf)] = port_number
vll['lhs_intf'] = port_number
rhs_intf = vll['rhs_intf']
rhs_dpid = vll['rhs_dpid']
port_number = intf_to_port_number.get("%s-%s" % (rhs_dpid, rhs_intf), default)
if port_number == None :
for switch in parsedResult:
if switch["dpid"] == rhs_dpid:
for port in switch["ports"]:
if port["name"] == rhs_intf:
port_number = str(port["portNumber"])
intf_to_port_number["%s-%s" % (rhs_dpid, rhs_intf)] = port_number
vll['rhs_intf'] = port_number
print "*** PUSHER_CFG", json.dumps(pusher_cfg, sort_keys=True, indent=4)
print "*** INTFS", json.dumps(intf_to_port_number, sort_keys=True, indent=4)
# Add Vlls Reading All the Information From Configuration File
def add_command(args):
print "*** Add Vlls From Configuration File"
print "*** Read Previous Vlls Inserted"
if os.path.exists('./vlls.json'):
vllsDb = open('./vlls.json','r')
vlllines = vllsDb.readlines()
vllsDb.close()
else:
vlllines={}
read_conf_file()
# We use this algorithm for the name generation
key = '0123456789ABCDEF'
sip = siphash.SipHash_2_4(key)
# Extract from cmd line options the controlller information
controllerRestIp = args.controllerRestIp
# Dictionary that stores the mapping port:next_label
# We allocate the label using a counter, and we associate for each port used in this execution the next usable label
# Probably in future we can add the persistence for the label
sw_port_tag = {}
convert_intf_to_port_number(controllerRestIp)
# We can have more than one vlls
for vll in pusher_cfg['vlls']:
# Retrieve the information
srcSwitch = vll['lhs_dpid']
srcPort = vll['lhs_intf']
dstSwitch = vll['rhs_dpid']
dstPort = vll['rhs_intf']
srcLabel = vll['lhs_label']
dstLabel = vll['rhs_label']
print "*** Generate Name From VLL (%s-%s-%s) - (%s-%s-%s)" % (srcSwitch, srcPort, srcLabel, dstSwitch, dstPort, dstLabel)
sip.update(srcSwitch + "$" + srcPort + "$" + dstSwitch + "$" + dstPort + "$" + srcLabel + "$" + dstLabel)
# Generate the name
digest = sip.hash()
digest = str(digest)
print "*** Vll Name", digest
vllExists = False
# if the vll exists in the vllDb, we don't insert the flow
for line in vlllines:
data = json.loads(line)
if data['name']==(digest):
print "Vll %s exists already Skip" % digest
vllExists = True
break
if vllExists == True:
continue
print "*** Create Vll:"
print "*** From Source Device OSHI-PE %s Port %s" % (srcSwitch,srcPort)
print "*** To Destination Device OSHI-PE %s Port %s"% (dstSwitch,dstPort)
# Retrieving route from source to destination
# using Routing rest API
command = "curl -s http://%s/wm/topology/route/%s/%s/%s/%s/json | python -mjson.tool" % (controllerRestIp, srcSwitch, srcPort, dstSwitch, dstPort)
result = os.popen(command).read()
parsedResult = json.loads(result)
print
#print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
# Dictionary used for store the label of current vll
temp_sw_port_tag = {}
# We insert the rule each two json item, because floodlight's getRoute for each dpid, provides
# A couple of item the in/out port and the out/in port for the rules forward/reverse - see the
# output of the previous command
temp_key1 = None
temp_key2 = None
temp_tag1 = None
temp_tag2 = None
ap1Dpid = None
ap1Port = None
ap2Dpid = None
ap2Port = None
default = 2
max_value = 4095
if int(srcLabel) > max_value or int(dstLabel) > max_value:
print "Ingress or Egress Label Not Allowable"
sys.exit(-2)
# We generate the labels associated for each port, while the ingress/egress and egress/ingress labels
# come from the configuration file, because they depend on the local network choice
for j in range(0, (len(parsedResult))):
# Label for the LHS port
if j == 0:
temp_key1 = srcSwitch + "-" + srcPort
temp_sw_port_tag[temp_key1] = int(srcLabel)
if sw_port_tag.get(temp_key1,default) <= int(srcLabel):
sw_port_tag[temp_key1] = int(srcLabel)
# Label for the RHS port
elif j == (len(parsedResult)-1):
temp_key1 = dstSwitch + "-" + dstPort
temp_sw_port_tag[temp_key1] = int(dstLabel)
if sw_port_tag.get(temp_key1,default) <= int(dstLabel):
sw_port_tag[temp_key1] = int(dstLabel)
# Middle ports
else :
apDPID = parsedResult[j]['switch']
apPORT = parsedResult[j]['port']
temp_key1 = apDPID + "-" + str(apPORT)
value = sw_port_tag.get(temp_key1, default)
temp_sw_port_tag[temp_key1] = value
value = value + 1
sw_port_tag[temp_key1] = value
print "*** Current Route Tag:"
print json.dumps(temp_sw_port_tag, sort_keys=True, indent=4)
print
print "*** Global Routes Tag:"
print json.dumps(sw_port_tag, sort_keys=True, indent=4)
print
# Manage the special case of one hop
if len(parsedResult) == 2:
print "*** One Hop Route"
# The Switch, where we insert the rule
ap1Dpid = parsedResult[0]['switch']
# In port
ap1Port = str(parsedResult[0]['port'])
temp_key1 = ap1Dpid + "-" + ap1Port
tag1 = temp_sw_port_tag[temp_key1]
# ap1Dpid == ap2Dpid
ap2Dpid = parsedResult[1]['switch']
# Out port
ap2Port = str(parsedResult[1]['port'])
temp_key2 = ap2Dpid + "-" + ap2Port
tag2 = temp_sw_port_tag[temp_key2]
if tag1 == 0 and tag2 ==0:
# Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 !=0 and tag2==0:
# Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"strip-vlan,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 ==0 and tag2 !=0:
# Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", "0xffff", ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
# Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
if tag2 == 0 and tag1 ==0:
# Reverse Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag2 != 0 and tag1 ==0:
# Reverse Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"strip-vlan,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag2, ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag2 == 0 and tag1 !=0:
# Reverse Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", "0xffff", ap2Port, tag1, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
# Reverse Forward's Rule
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag2, ap2Port, tag1, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
store_vll(digest, ap1Dpid)
# see the image one_hop for details on the switching label procedure
else:
# In the other cases we use a different approach for the rule; before we see the label
# of the inport and outport of the same dpid; with more than one hop we see in general for
# the forward rule the label of the inport on the next switch, while in the reverse rule the label of the inport on the
# previous switch. The previous approach is nested in a for loop, we use this loop in the middle dpid, while
# we manage as special case the ingress/egress node, because the rules are different
print "*** %s Hop Route" % (len(parsedResult)/2)
# We manage first ingress/egress node
print "*** Create Ingress Rules For LHS Of The Vll - %s" % (srcSwitch)
# see the image more_than_one_hop for details on the switching label procedure
ap1Dpid = parsedResult[0]['switch']
ap1Port = parsedResult[0]['port']
temp_key1 = ap1Dpid + "-" + str(ap1Port)
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key1, tag1)
ap2Dpid = parsedResult[1]['switch']
ap2Port = parsedResult[1]['port']
temp_key2 = parsedResult[2]['switch'] + "-" + str(parsedResult[2]['port'])
tag2 = temp_sw_port_tag[temp_key2]
print "*** outKey: %s, outTag: %s" % (temp_key2, tag2)
print
if tag1 == 0 and tag2 !=0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", "0xffff", ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 != 0 and tag2 !=0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
print "Error Tag";
sys.exit(-2)
print "*** Create Egress Rules For LHS Of The Vll - %s" % (srcSwitch)
temp_key2 = temp_key1
tag2 = tag1
temp_key1 = ap2Dpid + "-" + str(ap2Port)
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key1, tag1)
print "*** outKey: %s, outTag: %s" % (temp_key2, tag2)
print
if tag1 != 0 and tag2 ==0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"strip-vlan,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag1, ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 != 0 and tag2 !=0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag1, ap2Port, tag2, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
print "Error Tag";
sys.exit(-2)
store_vll(digest, ap1Dpid)
print "*** Create Egress Rules For RHS Of The Vll - %s" % (dstSwitch)
ap1Dpid = parsedResult[len(parsedResult)-2]['switch']
ap1Port = parsedResult[len(parsedResult)-2]['port']
temp_key1 = ap1Dpid + "-" + str(ap1Port)
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key1, tag1)
ap2Dpid = parsedResult[len(parsedResult)-1]['switch']
ap2Port = parsedResult[len(parsedResult)-1]['port']
temp_key2 = ap2Dpid + "-" + str(ap2Port)
tag2 = temp_sw_port_tag[temp_key2]
print "*** outKey: %s, outTag: %s" % (temp_key2, tag2)
print
if tag1 != 0 and tag2 ==0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"strip-vlan,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 != 0 and tag2 !=0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
print "Error Tag";
sys.exit(-2)
print "*** Create Ingress Rules For RHS Of The Vll - %s" % (dstSwitch)
temp_key1 = parsedResult[len(parsedResult)-3]['switch'] + "-" + str(parsedResult[len(parsedResult)-3]['port'])
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key2, tag2)
print "*** outKey: %s, outTag: %s" % (temp_key1, tag1)
print
if tag1 != 0 and tag2 ==0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", "0xffff", ap2Port, tag1, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
elif tag1 != 0 and tag2 !=0:
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag2, ap2Port, tag1, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
else:
print "Error Tag";
sys.exit(-2)
store_vll(digest, ap1Dpid)
# Now we manage the middle nodes
for i in range(2, (len(parsedResult)-2)):
print "index:", i
if i % 2 == 0:
ap1Dpid = parsedResult[i]['switch']
ap1Port = parsedResult[i]['port']
print ap1Dpid, ap1Port
else:
ap2Dpid = parsedResult[i]['switch']
ap2Port = parsedResult[i]['port']
print ap2Dpid, ap2Port
print "*** Create Rules For %s" % ap1Dpid
# send one flow mod per pair in route
# using StaticFlowPusher rest API
temp_key1 = ap1Dpid + "-" + str(ap1Port)
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key1, tag1)
temp_key2 = parsedResult[i+1]['switch'] + "-" + str(parsedResult[i+1]['port'])
tag2 = temp_sw_port_tag[temp_key2]
print "*** outKey: %s, outTag: %s" % (temp_key2, tag2)
print
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".f", tag1, ap1Port, tag2, ap2Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
temp_key1 = ap2Dpid + "-" + str(ap2Port)
tag1 = temp_sw_port_tag[temp_key1]
print "*** inKey: %s, inTag: %s" % (temp_key1, tag1)
temp_key2 = parsedResult[i-2]['switch'] + "-" + str(parsedResult[i-2]['port'])
tag2 = temp_sw_port_tag[temp_key2]
print "*** outKey: %s, outTag: %s" % (temp_key2, tag2)
print
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"vlan-id\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"set-vlan-id=%s,output=%s\"}' http://%s/wm/staticflowentrypusher/json | python -mjson.tool" % (ap1Dpid, ap1Dpid + "." + digest + ".r", tag1, ap2Port, tag2, ap1Port, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
store_vll(digest, ap1Dpid)
def del_command(args):
print "*** Delete Vlls From Configuration File"
print "*** Read Previous Vlls Inserted"
if os.path.exists('vlls.json'):
vllsDb = open('vlls.json','r')
lines = vllsDb.readlines()
vllsDb.close()
vllsDb = open('vlls.json','w')
else:
lines={}
print "*** No Vlls Inserted"
return
# Removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
controllerRestIp = args.controllerRestIp
for line in lines:
data = json.loads(line)
sw = data['Dpid']
digest = data['name']
print "*** Deleting Vll: %s - Switch %s" % (digest,sw)
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json 2> /dev/null | python -mjson.tool" % (sw + "." + digest + ".f", sw, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json 2> /dev/null | python -mjson.tool" % (sw + "." + digest +".r", sw, controllerRestIp)
result = os.popen(command).read()
print "*** Sent Command:", command + "\n"
print "*** Received Result:", result + "\n"
vllsDb.close()
def run_command(data):
if args.action == 'add':
add_command(data)
elif args.action == 'delete':
del_command(data)
if __name__ == '__main__':
args = parse_cmd_line()
run_command(args)
| apache-2.0 | -6,129,818,374,229,325,000 | 44.754717 | 371 | 0.612784 | false |
0-wiz-0/psutil | psutil/_pssunos.py | 1 | 20127 | # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS Solaris platform implementation."""
import errno
import os
import socket
import subprocess
import sys
from collections import namedtuple
from . import _common
from . import _psposix
from . import _psutil_posix as cext_posix
from . import _psutil_sunos as cext
from ._common import isfile_strict
from ._common import sockfam_to_enum
from ._common import socktype_to_enum
from ._common import usage_percent
from ._compat import b
from ._compat import PY3
__extra__all__ = ["CONN_IDLE", "CONN_BOUND", "PROCFS_PATH"]
PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
AF_LINK = cext_posix.AF_LINK
CONN_IDLE = "IDLE"
CONN_BOUND = "BOUND"
PROC_STATUSES = {
cext.SSLEEP: _common.STATUS_SLEEPING,
cext.SRUN: _common.STATUS_RUNNING,
cext.SZOMB: _common.STATUS_ZOMBIE,
cext.SSTOP: _common.STATUS_STOPPED,
cext.SIDL: _common.STATUS_IDLE,
cext.SONPROC: _common.STATUS_RUNNING, # same as run
cext.SWAIT: _common.STATUS_WAITING,
}
TCP_STATUSES = {
cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.TCPS_CLOSED: _common.CONN_CLOSE,
cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
cext.TCPS_LISTEN: _common.CONN_LISTEN,
cext.TCPS_CLOSING: _common.CONN_CLOSING,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
cext.TCPS_IDLE: CONN_IDLE, # sunos specific
cext.TCPS_BOUND: CONN_BOUND, # sunos specific
}
scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pmem = namedtuple('pmem', ['rss', 'vms'])
pmmap_grouped = namedtuple('pmmap_grouped',
['path', 'rss', 'anonymous', 'locked'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# set later from __init__.py
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# --- utils
def get_procfs_path():
return sys.modules['psutil'].PROCFS_PATH
# --- functions
disk_io_counters = cext.disk_io_counters
net_io_counters = cext.net_io_counters
disk_usage = _psposix.disk_usage
net_if_addrs = cext_posix.net_if_addrs
def virtual_memory():
# we could have done this with kstat, but imho this is good enough
total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
# note: there's no difference on Solaris
free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
used = total - free
percent = usage_percent(used, total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
sin, sout = cext.swap_mem()
# XXX
# we are supposed to get total/free by doing so:
# http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
# usr/src/cmd/swap/swap.c
# ...nevertheless I can't manage to obtain the same numbers as 'swap'
# cmdline utility, so let's parse its output (sigh!)
p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
os.environ['PATH'], 'swap', '-l'],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout = stdout.decode(sys.stdout.encoding)
if p.returncode != 0:
raise RuntimeError("'swap -l' failed (retcode=%s)" % p.returncode)
lines = stdout.strip().split('\n')[1:]
if not lines:
raise RuntimeError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
total += int(int(t) * 512)
free += int(int(f) * 512)
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent,
sin * PAGE_SIZE, sout * PAGE_SIZE)
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
def pid_exists(pid):
"""Check for the existence of a unix pid."""
return _psposix.pid_exists(pid)
def cpu_times():
"""Return system-wide CPU times as a named tuple"""
ret = cext.per_cpu_times()
return scputimes(*[sum(x) for x in zip(*ret)])
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples"""
ret = cext.per_cpu_times()
return [scputimes(*x) for x in ret]
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# mimic os.cpu_count() behavior
return None
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
localhost = (':0.0', ':0')
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname in localhost:
hostname = 'localhost'
nt = _common.suser(user, tty, hostname, tstamp)
retlist.append(nt)
return retlist
def disk_partitions(all=False):
"""Return system disk partitions."""
# TODO - the filtering logic should be better checked so that
# it tries to reflect 'df' as much as possible
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
# Differently from, say, Linux, we don't have a list of
# common fs types so the best we can do, AFAIK, is to
# filter by filesystem having a total size > 0.
if not disk_usage(mountpoint).total:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
Only INET sockets are returned (UNIX are not).
"""
cmap = _common.conn_tmap.copy()
if _pid == -1:
cmap.pop('unix', 0)
if kind not in cmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in cmap])))
families, types = _common.conn_tmap[kind]
rawlist = cext.net_connections(_pid)
ret = set()
for item in rawlist:
fd, fam, type_, laddr, raddr, status, pid = item
if fam not in families:
continue
if type_ not in types:
continue
status = TCP_STATUSES[status]
fam = sockfam_to_enum(fam)
type_ = socktype_to_enum(type_)
if _pid == -1:
nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = cext.net_if_stats()
for name, items in ret.items():
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid", "_procfs_path"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
self._procfs_path = get_procfs_path()
@wrap_exceptions
def name(self):
# note: max len == 15
return cext.proc_name_and_args(self.pid, self._procfs_path)[0]
@wrap_exceptions
def exe(self):
try:
return os.readlink(
"%s/%s/path/a.out" % (self._procfs_path, self.pid))
except OSError:
pass # continue and guess the exe name from the cmdline
# Will be guessed later from cmdline but we want to explicitly
# invoke cmdline here in order to get an AccessDenied
# exception if the user has not enough privileges.
self.cmdline()
return ""
@wrap_exceptions
def cmdline(self):
return cext.proc_name_and_args(
self.pid, self._procfs_path)[1].split(' ')
@wrap_exceptions
def create_time(self):
return cext.proc_basic_info(self.pid, self._procfs_path)[3]
@wrap_exceptions
def num_threads(self):
return cext.proc_basic_info(self.pid, self._procfs_path)[5]
@wrap_exceptions
def nice_get(self):
# For some reason getpriority(3) return ESRCH (no such process)
# for certain low-pid processes, no matter what (even as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
@wrap_exceptions
def nice_set(self, value):
if self.pid in (2, 3):
# Special case PIDs: internally setpriority(3) return ESRCH
# (no such process), no matter what.
# The process actually exists though, as it has a name,
# creation time, etc.
raise AccessDenied(self.pid, self._name)
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def ppid(self):
return cext.proc_basic_info(self.pid, self._procfs_path)[0]
@wrap_exceptions
def uids(self):
real, effective, saved, _, _, _ = \
cext.proc_cred(self.pid, self._procfs_path)
return _common.puids(real, effective, saved)
@wrap_exceptions
def gids(self):
_, _, _, real, effective, saved = \
cext.proc_cred(self.pid, self._procfs_path)
return _common.puids(real, effective, saved)
@wrap_exceptions
def cpu_times(self):
user, system = cext.proc_cpu_times(self.pid, self._procfs_path)
return _common.pcputimes(user, system)
@wrap_exceptions
def terminal(self):
procfs_path = self._procfs_path
hit_enoent = False
tty = wrap_exceptions(
cext.proc_basic_info(self.pid, self._procfs_path)[0])
if tty != cext.PRNODEV:
for x in (0, 1, 2, 255):
try:
return os.readlink(
'%s/%d/path/%d' % (procfs_path, self.pid, x))
except OSError as err:
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('%s/%s' % (procfs_path, self.pid))
@wrap_exceptions
def cwd(self):
# /proc/PID/path/cwd may not be resolved by readlink() even if
# it exists (ls shows it). If that's the case and the process
# is still alive return None (we can return None also on BSD).
# Reference: http://goo.gl/55XgO
procfs_path = self._procfs_path
try:
return os.readlink("%s/%s/path/cwd" % (procfs_path, self.pid))
except OSError as err:
if err.errno == errno.ENOENT:
os.stat("%s/%s" % (procfs_path, self.pid))
return None
raise
@wrap_exceptions
def memory_info(self):
ret = cext.proc_basic_info(self.pid, self._procfs_path)
rss, vms = ret[1] * 1024, ret[2] * 1024
return _common.pmem(rss, vms)
@wrap_exceptions
def status(self):
code = cext.proc_basic_info(self.pid, self._procfs_path)[6]
# XXX is '?' legit? (we're not supposed to return it anyway)
return PROC_STATUSES.get(code, '?')
@wrap_exceptions
def threads(self):
procfs_path = self._procfs_path
ret = []
tids = os.listdir('%s/%d/lwp' % (procfs_path, self.pid))
hit_enoent = False
for tid in tids:
tid = int(tid)
try:
utime, stime = cext.query_process_thread(
self.pid, tid, procfs_path)
except EnvironmentError as err:
# ENOENT == thread gone in meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
nt = _common.pthread(tid, utime, stime)
ret.append(nt)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('%s/%s' % (procfs_path, self.pid))
return ret
@wrap_exceptions
def open_files(self):
retlist = []
hit_enoent = False
procfs_path = self._procfs_path
pathdir = '%s/%d/path' % (procfs_path, self.pid)
for fd in os.listdir('%s/%d/fd' % (procfs_path, self.pid)):
path = os.path.join(pathdir, fd)
if os.path.islink(path):
try:
file = os.readlink(path)
except OSError as err:
# ENOENT == file which is gone in the meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
if isfile_strict(file):
retlist.append(_common.popenfile(file, int(fd)))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('%s/%s' % (procfs_path, self.pid))
return retlist
def _get_unix_sockets(self, pid):
"""Get UNIX sockets used by process by parsing 'pfiles' output."""
# TODO: rewrite this in C (...but the damn netstat source code
# does not include this part! Argh!!)
cmd = "pfiles %s" % pid
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if p.returncode != 0:
if 'permission denied' in stderr.lower():
raise AccessDenied(self.pid, self._name)
if 'no such process' in stderr.lower():
raise NoSuchProcess(self.pid, self._name)
raise RuntimeError("%r command error\n%s" % (cmd, stderr))
lines = stdout.split('\n')[2:]
for i, line in enumerate(lines):
line = line.lstrip()
if line.startswith('sockname: AF_UNIX'):
path = line.split(' ', 2)[2]
type = lines[i - 2].strip()
if type == 'SOCK_STREAM':
type = socket.SOCK_STREAM
elif type == 'SOCK_DGRAM':
type = socket.SOCK_DGRAM
else:
type = -1
yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
@wrap_exceptions
def connections(self, kind='inet'):
ret = net_connections(kind, _pid=self.pid)
# The underlying C implementation retrieves all OS connections
# and filters them by PID. At this point we can't tell whether
# an empty list means there were no connections for process or
# process is no longer active so we force NSP in case the PID
# is no longer there.
if not ret:
# will raise NSP if process is gone
os.stat('%s/%s' % (self._procfs_path, self.pid))
# UNIX sockets
if kind in ('all', 'unix'):
ret.extend([_common.pconn(*conn) for conn in
self._get_unix_sockets(self.pid)])
return ret
nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
@wrap_exceptions
def memory_maps(self):
def toaddr(start, end):
return '%s-%s' % (hex(start)[2:].strip('L'),
hex(end)[2:].strip('L'))
procfs_path = self._procfs_path
retlist = []
rawlist = cext.proc_memory_maps(self.pid, procfs_path)
hit_enoent = False
for item in rawlist:
addr, addrsize, perm, name, rss, anon, locked = item
addr = toaddr(addr, addrsize)
if not name.startswith('['):
try:
name = os.readlink(
'%s/%s/path/%s' % (procfs_path, self.pid, name))
except OSError as err:
if err.errno == errno.ENOENT:
# sometimes the link may not be resolved by
# readlink() even if it exists (ls shows it).
# If that's the case we just return the
# unresolved link path.
# This seems an incosistency with /proc similar
# to: http://goo.gl/55XgO
name = '%s/%s/path/%s' % (procfs_path, self.pid, name)
hit_enoent = True
else:
raise
retlist.append((addr, perm, name, rss, anon, locked))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('%s/%s' % (procfs_path, self.pid))
return retlist
@wrap_exceptions
def num_fds(self):
return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
@wrap_exceptions
def num_ctx_switches(self):
return _common.pctxsw(
*cext.proc_num_ctx_switches(self.pid, self._procfs_path))
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
raise TimeoutExpired(timeout, self.pid, self._name)
| bsd-3-clause | 6,773,734,069,539,041,000 | 34.434859 | 78 | 0.57167 | false |
alunduil/fig | compose/service.py | 1 | 31102 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import re
import sys
from collections import namedtuple
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config
from docker.utils import LogConfig
from docker.utils.ports import build_port_bindings
from docker.utils.ports import split_port
from . import __version__
from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment
from .config.validation import VALID_NAME_CHARS
from .const import DEFAULT_TIMEOUT
from .const import LABEL_CONFIG_HASH
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_VERSION
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
from .utils import json_hash
from .utils import parallel_execute
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'log_opt',
'mem_limit',
'memswap_limit',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False, filters={}):
filters.update({'label': self.labels(one_off=one_off)})
containers = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters=filters)]))
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def pause(self, **options):
for c in self.containers(filters={'status': 'running'}):
log.info("Pausing %s..." % c.name)
c.pause(**options)
def unpause(self, **options):
for c in self.containers(filters={'status': 'paused'}):
log.info("Unpausing %s..." % c.name)
c.unpause()
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
force_recreate=False):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if not allow_recreate:
return ConvergencePlan('start', containers)
if force_recreate or self._containers_have_diverged(containers):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s...' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
@property
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
])
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return None
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
else:
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
port = str(port)
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', 'json-file'),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = self.options['build']
# python2 os.path() doesn't support unicode, so we need to encode it to
# a byte string
if not six.PY3:
path = path.encode('utf8')
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=False,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, six.text_type(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self):
if 'image' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(repo_path):
"""Splits image identification into base image path, tag/digest
and it's separator.
Example:
>>> parse_repository_tag('user/repo@sha256:digest')
('user/repo', 'sha256:digest', '@')
>>> parse_repository_tag('user/repo:v1')
('user/repo', 'v1', ':')
"""
tag_separator = ":"
digest_separator = "@"
if digest_separator in repo_path:
repo, tag = repo_path.rsplit(digest_separator, 1)
return repo, tag, digest_separator
repo, tag = repo_path, ""
if tag_separator in repo_path:
repo, tag = repo_path.rsplit(tag_separator, 1)
if "/" in tag:
repo, tag = repo_path, ""
return repo, tag, tag_separator
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return list(volume_bindings.values())
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + list(image_volumes)):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = parts[2] if len(parts) == 3 else 'rw'
return VolumeSpec(external, internal, mode)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
| apache-2.0 | -7,945,727,860,122,708,000 | 31.601677 | 134 | 0.571603 | false |
eayunstack/neutron | neutron/services/logapi/rpc/server.py | 1 | 2538 | # Copyright (C) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
import oslo_messaging
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import rpc as n_rpc
from neutron.services.logapi.common import constants as log_const
from neutron.services.logapi.common import db_api
class LoggingApiSkeleton(object):
"""Skeleton proxy code for agent->server communication."""
# History
# 1.0 Initial version
target = oslo_messaging.Target(
version='1.0', namespace=log_const.RPC_NAMESPACE_LOGGING)
def __init__(self):
self.conn = n_rpc.create_connection()
self.conn.create_consumer(log_const.LOGGING_PLUGIN, [self],
fanout=False)
@log_helpers.log_method_call
def get_sg_log_info_for_port(self, context, port_id):
return db_api.get_sg_log_info_for_port(context, port_id)
@log_helpers.log_method_call
def get_sg_log_info_for_log_resources(self, context, log_resources):
return db_api.get_sg_log_info_for_log_resources(context, log_resources)
class LoggingApiNotification(object):
def __init__(self):
self.notification_api = resources_rpc.ResourcesPushRpcApi()
@log_helpers.log_method_call
def create_log(self, context, log_obj):
self.notification_api.push(context, [log_obj], events.CREATED)
@log_helpers.log_method_call
def update_log(self, context, log_obj):
self.notification_api.push(context, [log_obj], events.UPDATED)
@log_helpers.log_method_call
def delete_log(self, context, log_obj):
self.notification_api.push(context, [log_obj], events.DELETED)
@log_helpers.log_method_call
def resource_update(self, context, log_objs):
"""Tell to agent when resources related to log_objects updated"""
self.notification_api.push(context, log_objs, events.UPDATED)
| apache-2.0 | 587,784,051,444,256,500 | 35.782609 | 79 | 0.702522 | false |
ndanielsen/data5 | hw/hw7/hw7.py | 1 | 4215 | import time
import itertools
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import scipy
class Glass(object):
def __init__(self, file, trials, knn_num, max_features):
self.header_row = header_row=['Id','RI','Na','Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe', 'Type']
self.df = pd.read_csv(file, names=self.header_row)
self.trials = trials
self.knn_num = knn_num
self.max_features = max_features
self.y = "Type" ### Test values
self.Id = "Id" ### Id Column
def setUP(self):
pass
def test(self):
print self.df.head(5)
def clean(self):
pass
def combo_gen(self):
"""
Generates every possible combination of numberic columns
"""
dfnum = self.df._get_numeric_data()
del dfnum[self.y]
del dfnum[self.Id]
lst = []
for col in dfnum.columns:
lst.append(col)
if len(lst) < self.max_features:
self.max_features = len(lst)
k = self.max_features
combo = []
for i in xrange(1, k+1):
for x in itertools.combinations(lst, i):
combo.append(list(x) )
return combo
def evaluate(self, X, y):
"""
Evaluates numberous iterations of train-test-split with numberous nearest neighbors
Returns the highest average score with the corresponding nearest neighbor
"""
results = {}
for state in xrange(1, self.trials): ### Number of train-test-split iterations to do
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=state)
for num in xrange(1, self.knn_num): #number of nearest neighbors to try
knn = KNeighborsClassifier(n_neighbors=num)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
score = metrics.accuracy_score(y_test, y_pred)
if num not in results:
results[num] = list()
results[num].append(score)
else:
results[num].append(score)
report = []
for key, value in results.iteritems(): #reviews all results and returns the greatest average score with n_neighbors num
report.append((round((np.mean(value)), 4), key))
return max(report)
def feature_combination(self, combo):
start = time.time()
column_list = []
for columns in combo:
X = self.df[columns].values
y = self.df[self.y].values
evaluation = self.evaluate(X, y)
column_list.append((evaluation, columns))
result = max(column_list)
timer = round(time.time() - start, 0)
return result, len(combo), timer
def report(self):
result, columns, timer = self.results
knn_test, permutations = result
percentage, knn_num = knn_test
ts = time.time()
st = datetime.datetime.fromtimestamp(ts)
date = st.strftime('%Y-%m-%d')
clock = st.strftime('%H:%M:%S')
test_report = """ \
#Test on %r at %r
%r Random train-test-split trials \n
%r Maximum Features (columns)
%r Total Feature permutations \n
%r KNN neighbors evaluated (upper range)
\n
**%r Average Correct Prediction** \n
%r KNN Nearest Neighbors Selected \n
Features Selected: %r \n
\n
_%r seconds_ \n
""" % (date, clock, self.trials, self.max_features, columns, self.knn_num, percentage, knn_num, permutations, timer)
with open("readme.md", "a") as myfile:
myfile.write(test_report)
def main(self):
self.setUP()
self.clean()
combo = self.combo_gen()
self.results = self.feature_combination(combo)
self.report()
return self.results
if __name__ == '__main__':
glass = Glass("glass.csv", 10, 5, 10)
print glass.main()
| mit | 930,583,122,895,792,800 | 24.391566 | 127 | 0.562515 | false |
utkarsh-goswami/erpnext | erpnext/manufacturing/doctype/bom/bom.py | 1 | 19540 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt
from frappe import _
from erpnext.setup.utils import get_exchange_rate
from frappe.website.website_generator import WebsiteGenerator
from erpnext.stock.get_item_details import get_conversion_factor
from operator import itemgetter
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class BOM(WebsiteGenerator):
website = frappe._dict(
# page_title_field = "item_name",
condition_field = "show_in_website",
template = "templates/generators/bom.html"
)
def autoname(self):
names = frappe.db.sql_list("""select name from `tabBOM` where item=%s""", self.item)
if names:
# name can be BOM/ITEM/001, BOM/ITEM/001-1, BOM-ITEM-001, BOM-ITEM-001-1
# split by item
names = [name.split(self.item)[-1][1:] for name in names]
# split by (-) if cancelled
names = [cint(name.split('-')[-1]) for name in names]
idx = max(names) + 1
else:
idx = 1
self.name = 'BOM-' + self.item + ('-%.3i' % idx)
def validate(self):
# if not self.route:
self.route = frappe.scrub(self.name).replace('_', '-')
self.clear_operations()
self.validate_main_item()
self.validate_currency()
self.set_conversion_rate()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "stock_qty", "BOM Item")
self.validate_materials()
self.set_bom_material_details()
self.validate_operations()
self.calculate_cost()
def get_context(self, context):
context.parents = [{'name': 'boms', 'title': _('All BOMs') }]
def on_update(self):
self.check_recursion()
self.update_stock_qty()
self.update_exploded_items()
def on_submit(self):
self.manage_default_bom()
def on_cancel(self):
frappe.db.set(self, "is_active", 0)
frappe.db.set(self, "is_default", 0)
# check if used in any other bom
self.validate_bom_links()
self.manage_default_bom()
def on_update_after_submit(self):
self.validate_bom_links()
self.manage_default_bom()
def get_item_det(self, item_code):
item = frappe.db.sql("""select name, item_name, docstatus, description, image,
is_sub_contracted_item, stock_uom, default_bom, last_purchase_rate
from `tabItem` where name=%s""", item_code, as_dict = 1)
if not item:
frappe.throw(_("Item: {0} does not exist in the system").format(item_code))
return item
def validate_rm_item(self, item):
if (item[0]['name'] in [it.item_code for it in self.items]) and item[0]['name'] == self.item:
frappe.throw(_("Raw material cannot be same as main Item"))
def set_bom_material_details(self):
for item in self.get("items"):
ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no,
"stock_qty": item.stock_qty})
for r in ret:
if not item.get(r):
item.set(r, ret[r])
self.validate_bom_currecny(item)
def get_bom_material_detail(self, args=None):
""" Get raw material details like uom, desc and rate"""
if not args:
args = frappe.form_dict.get('args')
if isinstance(args, basestring):
import json
args = json.loads(args)
item = self.get_item_det(args['item_code'])
self.validate_rm_item(item)
args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or ''
args.update(item[0])
rate = self.get_rm_rate(args)
ret_item = {
'item_name' : item and args['item_name'] or '',
'description' : item and args['description'] or '',
'image' : item and args['image'] or '',
'stock_uom' : item and args['stock_uom'] or '',
'uom' : item and args['stock_uom'] or '',
'conversion_factor' : 1,
'bom_no' : args['bom_no'],
'rate' : rate,
'stock_qty' : args.get("qty") or args.get("stock_qty") or 1,
'base_rate' : rate if self.company_currency() == self.currency else rate * self.conversion_rate
}
return ret_item
def validate_bom_currecny(self, item):
if item.get('bom_no') and frappe.db.get_value('BOM', item.get('bom_no'), 'currency') != self.currency:
frappe.throw(_("Row {0}: Currency of the BOM #{1} should be equal to the selected currency {2}").format(item.idx, item.bom_no, self.currency))
def get_rm_rate(self, arg):
""" Get raw material rate as per selected method, if bom exists takes bom cost """
rate = 0
if arg.get('scrap_items'):
rate = self.get_valuation_rate(arg)
elif arg:
if self.rm_cost_as_per == 'Valuation Rate':
rate = self.get_valuation_rate(arg)
elif self.rm_cost_as_per == 'Last Purchase Rate':
rate = arg['last_purchase_rate']
elif self.rm_cost_as_per == "Price List":
if not self.buying_price_list:
frappe.throw(_("Please select Price List"))
rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list,
"item_code": arg["item_code"]}, "price_list_rate") or 0
if not rate and arg['bom_no']:
rate = self.get_bom_unitcost(arg['bom_no'])
return rate
def update_cost(self):
if self.docstatus == 2:
return
for d in self.get("items"):
rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no,
'stock_qty': d.stock_qty})["rate"]
if rate:
d.rate = rate
if self.docstatus == 1:
self.flags.ignore_validate_update_after_submit = True
self.calculate_cost()
self.save()
self.update_exploded_items()
frappe.msgprint(_("Cost Updated"))
def get_bom_unitcost(self, bom_no):
bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM`
where is_active = 1 and name = %s""", bom_no, as_dict=1)
return bom and bom[0]['unit_cost'] or 0
def get_valuation_rate(self, args):
""" Get weighted average of valuation rate from all warehouses """
total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0
for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin`
where item_code=%s""", args['item_code'], as_dict=1):
total_qty += flt(d.actual_qty)
total_value += flt(d.stock_value)
if total_qty:
valuation_rate = total_value / total_qty
if valuation_rate <= 0:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code'])
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
return valuation_rate
def manage_default_bom(self):
""" Uncheck others if current one is selected as default,
update default bom in item master
"""
if self.is_default and self.is_active:
from frappe.model.utils import set_default
set_default(self, "item")
item = frappe.get_doc("Item", self.item)
if item.default_bom != self.name:
item.default_bom = self.name
item.save(ignore_permissions = True)
else:
frappe.db.set(self, "is_default", 0)
item = frappe.get_doc("Item", self.item)
if item.default_bom == self.name:
item.default_bom = None
item.save(ignore_permissions = True)
def clear_operations(self):
if not self.with_operations:
self.set('operations', [])
def validate_main_item(self):
""" Validate main FG item"""
item = self.get_item_det(self.item)
if not item:
frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item))
else:
ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"])
self.description = ret[0]
self.uom = ret[1]
self.item_name= ret[2]
if not self.quantity:
frappe.throw(_("Quantity should be greater than 0"))
def validate_currency(self):
if self.rm_cost_as_per == 'Price List' and \
frappe.db.get_value('Price List', self.buying_price_list, 'currency') != self.currency:
frappe.throw(_("Currency of the price list {0} is not similar with the selected currency {1}").format(self.buying_price_list, self.currency))
def update_stock_qty(self):
for m in self.get('items'):
if not m.conversion_factor:
m.conversion_factor = flt(get_conversion_factor(m.item_code, m.uom)['conversion_factor'])
if m.uom and m.qty:
m.stock_qty = flt(m.conversion_factor)*flt(m.qty)
if not m.uom and m.stock_uom:
m.uom = m.stock_uom
m.qty = m.stock_qty
def set_conversion_rate(self):
self.conversion_rate = get_exchange_rate(self.currency, self.company_currency())
def validate_materials(self):
""" Validate raw material entries """
def get_duplicates(lst):
seen = set()
seen_add = seen.add
for item in lst:
if item.item_code in seen or seen_add(item.item_code):
yield item
if not self.get('items'):
frappe.throw(_("Raw Materials cannot be blank."))
check_list = []
for m in self.get('items'):
if m.bom_no:
validate_bom_no(m.item_code, m.bom_no)
if flt(m.stock_qty) <= 0:
frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx))
check_list.append(m)
duplicate_items = list(get_duplicates(check_list))
if duplicate_items:
li = []
for i in duplicate_items:
li.append("{0} on row {1}".format(i.item_code, i.idx))
duplicate_list = '<br>' + '<br>'.join(li)
frappe.throw(_("Same item has been entered multiple times. {list}").format(list=duplicate_list))
def check_recursion(self):
""" Check whether recursion occurs in any bom"""
check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']]
for d in check_list:
bom_list, count = [self.name], 0
while (len(bom_list) > count ):
boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " %
(d[0], d[1], '%s'), cstr(bom_list[count]))
count = count + 1
for b in boms:
if b[0] == self.name:
frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name))
if b[0]:
bom_list.append(b[0])
def update_cost_and_exploded_items(self, bom_list=[]):
bom_list = self.traverse_tree(bom_list)
for bom in bom_list:
bom_obj = frappe.get_doc("BOM", bom)
bom_obj.on_update()
return bom_list
def traverse_tree(self, bom_list=None):
def _get_children(bom_no):
return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item`
where parent = %s and ifnull(bom_no, '') != ''""", bom_no)]
count = 0
if not bom_list:
bom_list = []
if self.name not in bom_list:
bom_list.append(self.name)
while(count < len(bom_list)):
for child_bom in _get_children(bom_list[count]):
if child_bom not in bom_list:
bom_list.append(child_bom)
count += 1
bom_list.reverse()
return bom_list
def calculate_cost(self):
"""Calculate bom totals"""
self.calculate_op_cost()
self.calculate_rm_cost()
self.calculate_sm_cost()
self.total_cost = self.operating_cost + self.raw_material_cost - self.scrap_material_cost
self.base_total_cost = self.base_operating_cost + self.base_raw_material_cost - self.base_scrap_material_cost
def calculate_op_cost(self):
"""Update workstation rate and calculates totals"""
self.operating_cost = 0
self.base_operating_cost = 0
for d in self.get('operations'):
if d.workstation:
if not d.hour_rate:
d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate"))
if d.hour_rate and d.time_in_mins:
d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0
d.base_hour_rate = flt(d.hour_rate) * flt(self.conversion_rate)
d.base_operating_cost = flt(d.base_hour_rate) * flt(d.time_in_mins) / 60.0
self.operating_cost += flt(d.operating_cost)
self.base_operating_cost += flt(d.base_operating_cost)
def calculate_rm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_rm_cost = 0
base_total_rm_cost = 0
for d in self.get('items'):
if d.bom_no:
d.rate = self.get_bom_unitcost(d.bom_no)
d.base_rate = flt(d.rate) * flt(self.conversion_rate)
d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.stock_qty, self.precision("stock_qty", d))
d.base_amount = d.amount * flt(self.conversion_rate)
d.qty_consumed_per_unit = flt(d.stock_qty, self.precision("stock_qty", d)) / flt(self.quantity, self.precision("quantity"))
total_rm_cost += d.amount
base_total_rm_cost += d.base_amount
self.raw_material_cost = total_rm_cost
self.base_raw_material_cost = base_total_rm_cost
def calculate_sm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_sm_cost = 0
base_total_sm_cost = 0
for d in self.get('scrap_items'):
d.base_rate = d.rate * self.conversion_rate
d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.stock_qty, self.precision("stock_qty", d))
d.base_amount = d.amount * self.conversion_rate
total_sm_cost += d.amount
base_total_sm_cost += d.base_amount
self.scrap_material_cost = total_sm_cost
self.base_scrap_material_cost = base_total_sm_cost
def update_exploded_items(self):
""" Update Flat BOM, following will be correct data"""
self.get_exploded_items()
self.add_exploded_items()
def get_exploded_items(self):
""" Get all raw materials including items from child bom"""
self.cur_exploded_items = {}
for d in self.get('items'):
if d.bom_no:
self.get_child_exploded_items(d.bom_no, d.stock_qty)
else:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d.item_code,
'item_name' : d.item_name,
'description' : d.description,
'image' : d.image,
'stock_uom' : d.stock_uom,
'stock_qty' : flt(d.stock_qty),
'rate' : d.base_rate,
}))
def company_currency(self):
return frappe.db.get_value('Company', self.company, 'default_currency')
def add_to_cur_exploded_items(self, args):
if self.cur_exploded_items.get(args.item_code):
self.cur_exploded_items[args.item_code]["stock_qty"] += args.stock_qty
else:
self.cur_exploded_items[args.item_code] = args
def get_child_exploded_items(self, bom_no, stock_qty):
""" Add all items from Flat BOM of child BOM"""
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description,
bom_item.stock_uom, bom_item.stock_qty, bom_item.rate,
bom_item.stock_qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit
from `tabBOM Explosion Item` bom_item, tabBOM bom
where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1)
for d in child_fb_items:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d['item_code'],
'item_name' : d['item_name'],
'description' : d['description'],
'stock_uom' : d['stock_uom'],
'stock_qty' : d['qty_consumed_per_unit']*stock_qty,
'rate' : flt(d['rate']),
}))
def add_exploded_items(self):
"Add items to Flat BOM table"
frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name)
self.set('exploded_items', [])
for d in sorted(self.cur_exploded_items, key=itemgetter(0)):
ch = self.append('exploded_items', {})
for i in self.cur_exploded_items[d].keys():
ch.set(i, self.cur_exploded_items[d][i])
ch.amount = flt(ch.stock_qty) * flt(ch.rate)
ch.qty_consumed_per_unit = flt(ch.stock_qty) / flt(self.quantity)
ch.docstatus = self.docstatus
ch.db_insert()
def validate_bom_links(self):
if not self.is_active:
act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item
where bom_item.bom_no = %s and bom_item.docstatus = 1
and exists (select * from `tabBOM` where name = bom_item.parent
and docstatus = 1 and is_active = 1)""", self.name)
if act_pbom and act_pbom[0][0]:
frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs"))
def validate_operations(self):
if self.with_operations and not self.get('operations'):
frappe.throw(_("Operations cannot be left blank"))
if self.with_operations:
for d in self.operations:
if not d.description:
d.description = frappe.db.get_value('Operation', d.operation, 'description')
def get_list_context(context):
context.title = _("Bill of Materials")
# context.introduction = _('Boms')
def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1, fetch_scrap_items=0):
item_dict = {}
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
query = """select
bom_item.item_code,
item.item_name,
sum(bom_item.stock_qty/ifnull(bom.quantity, 1)) * %(qty)s as qty,
item.description,
item.image,
item.stock_uom,
item.default_warehouse,
item.expense_account as expense_account,
item.buying_cost_center as cost_center
from
`tab{table}` bom_item, `tabBOM` bom, `tabItem` item
where
bom_item.docstatus < 2
and bom.name = %(bom)s
and bom_item.parent = bom.name
and item.name = bom_item.item_code
and is_stock_item = 1
{conditions}
group by item_code, stock_uom"""
if fetch_exploded:
query = query.format(table="BOM Explosion Item",
conditions="""and item.is_sub_contracted_item = 0""")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
elif fetch_scrap_items:
query = query.format(table="BOM Scrap Item", conditions="")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
else:
query = query.format(table="BOM Item", conditions="")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
for item in items:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = item
for item, item_details in item_dict.items():
for d in [["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]:
company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company")
if not item_details.get(d[1]) or (company_in_record and company != company_in_record):
item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None
return item_dict
@frappe.whitelist()
def get_bom_items(bom, company, qty=1, fetch_exploded=1):
items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values()
items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1)
return items
def validate_bom_no(item, bom_no):
"""Validate BOM No of sub-contracted items"""
bom = frappe.get_doc("BOM", bom_no)
if not bom.is_active:
frappe.throw(_("BOM {0} must be active").format(bom_no))
if bom.docstatus != 1:
if not getattr(frappe.flags, "in_test", False):
frappe.throw(_("BOM {0} must be submitted").format(bom_no))
if item and not (bom.item.lower() == item.lower() or \
bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()):
frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item))
@frappe.whitelist()
def get_children():
if frappe.form_dict.parent:
return frappe.db.sql("""select
bom_item.item_code,
bom_item.bom_no as value,
bom_item.stock_qty,
if(ifnull(bom_item.bom_no, "")!="", 1, 0) as expandable,
item.image,
item.description
from `tabBOM Item` bom_item, tabItem item
where bom_item.parent=%s
and bom_item.item_code = item.name
order by bom_item.idx
""", frappe.form_dict.parent, as_dict=True) | gpl-3.0 | 8,606,196,022,705,561,000 | 33.585841 | 145 | 0.66172 | false |
elbeardmorez/quodlibet | quodlibet/tests/test_util_modulescanner.py | 1 | 6202 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import imp
import sys
import shutil
import py_compile
from quodlibet.util.modulescanner import ModuleScanner
from quodlibet.util.importhelper import get_importables, load_dir_modules
from tests import TestCase, mkdtemp
def py_compile_legacy(file_):
# so we get the same result on py2/3
py_compile.compile(file_, cfile=file_ + "c")
class TModuleScanner(TestCase):
def setUp(self):
self.d = mkdtemp("ql-mod")
sys.modules["qlfake"] = imp.new_module("qlfake")
def tearDown(self):
del sys.modules["qlfake"]
shutil.rmtree(self.d)
def _create_mod(self, name, package=None):
if package is not None:
base = os.path.join(self.d, package)
else:
base = self.d
return open(os.path.join(base, name), "wb")
def _create_pkg(self, name):
base = os.path.join(self.d, name)
os.mkdir(base)
return open(os.path.join(base, "__init__.py"), "wb")
def test_importables(self):
self.failUnlessEqual(list(get_importables(self.d)), [])
h = self._create_mod("foo.py")
h.close()
self.failUnlessEqual(list(get_importables(self.d))[0],
("foo", h.name, [h.name]))
def test_importables_ignore_init(self):
h = self._create_mod("foo7.py")
h.close()
self._create_mod("__init__.py").close()
self.failUnlessEqual(list(get_importables(self.d))[0],
("foo7", h.name, [h.name]))
def test_importables_package(self):
h = self._create_pkg("foobar")
self.failUnlessEqual(list(get_importables(self.d))[0],
("foobar", os.path.dirname(h.name), [h.name]))
h.close()
def test_importables_package_deps(self):
h = self._create_pkg("foobar3")
h2 = self._create_mod("sub.py", "foobar3")
name, path, deps = list(get_importables(self.d))[0]
self.failUnlessEqual(name, "foobar3")
self.failUnlessEqual(path, os.path.dirname(h.name))
self.failUnlessEqual(set(deps), {h.name, h2.name})
h2.close()
h.close()
def test_load_dir_modules(self):
h = self._create_mod("x.py")
h.write(b"test=42\n")
h.close()
mods = load_dir_modules(self.d, "qlfake")
self.failUnlessEqual(len(mods), 1)
self.failUnlessEqual(mods[0].test, 42)
def test_load_dir_modules_compiled_ignore(self):
h = self._create_mod("x1.py")
h.write(b"test=24\n")
h.close()
py_compile_legacy(h.name)
os.unlink(h.name)
assert os.listdir(self.d) == ["x1.pyc"]
mods = load_dir_modules(self.d, "qlfake")
self.failUnlessEqual(len(mods), 0)
def test_load_dir_modules_compiled(self):
h = self._create_mod("x1.py")
h.write(b"test=99\n")
h.close()
py_compile_legacy(h.name)
os.unlink(h.name)
assert os.listdir(self.d) == ["x1.pyc"]
mods = load_dir_modules(self.d, "qlfake", load_compiled=True)
self.failUnlessEqual(len(mods), 1)
self.failUnlessEqual(mods[0].test, 99)
def test_load_dir_modules_both(self):
h = self._create_mod("x1.py")
h.write(b"test=99\n")
h.close()
py_compile_legacy(h.name)
self.failUnlessEqual(set(os.listdir(self.d)), {"x1.pyc", "x1.py"})
mods = load_dir_modules(self.d, "qlfake", load_compiled=True)
self.failUnlessEqual(len(mods), 1)
self.failUnlessEqual(mods[0].test, 99)
def test_load_dir_modules_packages(self):
h = self._create_pkg("somepkg2")
h2 = self._create_mod("sub.py", "somepkg2")
h2.write(b"test=456\n")
h2.close()
h.write(b"from .sub import *\nmain=654\n")
h.close()
mods = load_dir_modules(self.d, "qlfake")
self.failUnlessEqual(len(mods), 1)
self.failUnlessEqual(mods[0].test, 456)
def test_scanner_add(self):
self._create_mod("q1.py").close()
self._create_mod("q2.py").close()
s = ModuleScanner([self.d])
self.failIf(s.modules)
removed, added = s.rescan()
self.failIf(removed)
self.failUnlessEqual(set(added), {"q1", "q2"})
self.failUnlessEqual(len(s.modules), 2)
self.failUnlessEqual(len(s.failures), 0)
def test_unimportable_package(self):
self._create_pkg("_foobar").close()
s = ModuleScanner([self.d])
self.failIf(s.modules)
removed, added = s.rescan()
self.failIf(added)
self.failIf(removed)
def test_scanner_remove(self):
h = self._create_mod("q3.py")
h.close()
s = ModuleScanner([self.d])
s.rescan()
os.remove(h.name)
try:
os.remove(h.name + "c")
except OSError:
pass
removed, added = s.rescan()
self.failIf(added)
self.failUnlessEqual(removed, ["q3"])
self.failUnlessEqual(len(s.modules), 0)
self.failUnlessEqual(len(s.failures), 0)
def test_scanner_error(self):
h = self._create_mod("q4.py")
h.write(b"1syntaxerror\n")
h.close()
s = ModuleScanner([self.d])
removed, added = s.rescan()
self.failIf(added)
self.failIf(removed)
self.failUnlessEqual(len(s.failures), 1)
self.failUnless("q4" in s.failures)
def test_scanner_add_package(self):
h = self._create_pkg("somepkg")
h2 = self._create_mod("sub.py", "somepkg")
h2.write(b"test=123\n")
h2.close()
h.write(b"from .sub import *\nmain=321\n")
h.close()
s = ModuleScanner([self.d])
removed, added = s.rescan()
self.failUnlessEqual(added, ["somepkg"])
self.failUnlessEqual(s.modules["somepkg"].module.main, 321)
self.failUnlessEqual(s.modules["somepkg"].module.test, 123)
| gpl-2.0 | -8,343,339,832,447,477,000 | 32.344086 | 75 | 0.582715 | false |
azubiaga/tweets2csv | tweepy/models.py | 1 | 12340 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
return max(ids) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
return min(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
setattr(status, 'json', json)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults(metadata.get('max_id'), metadata.get('since_id'))
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
for status in json['statuses']:
results.append(Status.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| mit | -7,217,803,486,821,469,000 | 27.498845 | 87 | 0.553809 | false |
evanbiederstedt/RRBSfun | methyl_PDR_cell/PDR_methyl_normal_pcell_by_cell1.py | 1 | 2364 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
pcells = glob.glob("RRBS_NormalBCD19pCD27pcell*")
newdf1 = pd.DataFrame()
for filename in pcells:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'tss', 'genes', 'exons', 'introns',
'promoter', 'cgi', 'geneDensity', 'ctcfUpstream', 'ctcfDownstream',
'ctcfDensity', 'geneDistalRegulatoryModules', 'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance',
'3PrimeUTRDistance', '5PrimeUTR', '5PrimeUTRDistance', 'firstExon',
'geneDistalRegulatoryModulesK562', 'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'tssDistance', 'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
chromosomes = ['chr2', 'chr5', 'chr11']
df = df[(df["chr"].isin(chromosomes))]
num_bins = np.ceil(df["avgReadCpGs"].max()/1.25)
df["avgReadCpGs_binned"] = pd.cut(df['avgReadCpGs'], num_bins, labels=False)
df["read_stack_ID"] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df["total_reads"] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df = df.groupby(["read_stack_ID"])[["filename", "thisMeth", "thisUnmeth", "methReadCount", "unmethReadCount", "mixedReadCount", "total_reads"]].sum()
df["filename"] = str(filename)
df = df[["filename", "thisMeth", "mixedReadCount", "total_reads"]].sum()
df["PDR_total"] = df["mixedReadCount"]/df["total_reads"]
df["methylation_total"] = df['thisMeth']/df['total_reads']
newdf1 = newdf1.append(df, ignore_index=True)
newdf1 = newdf1[["filename", "methylation_total", "PDR_total", "thisMeth", "mixedReadCount", "total_reads"]]
# export as .csv
newdf1.to_csv("Meth_PDR_cell_normalpcell.csv") | mit | -2,295,323,968,197,837,800 | 43.622642 | 153 | 0.62775 | false |
Huyuwei/tvm | tests/python/nightly/quantization/test_quantization_accuracy.py | 1 | 6399 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import tvm
from tvm import relay
from tvm.relay import quantize as qtz
import mxnet as mx
from mxnet import gluon
import logging
import os
logging.basicConfig(level=logging.INFO)
Config = namedtuple('Config', ['model', 'nbit_input', 'dtype_input', 'nbit_output', 'dtype_output', 'global_scale', 'expected_acc'])
def get_val_data(model_name,
rec_val,
batch_size,
num_workers=4):
rec_val = os.path.expanduser(rec_val)
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
img_size = 299 if model_name == 'inceptionv3' else 224
val_data = mx.io.ImageRecordIter(
path_imgrec = rec_val,
preprocess_threads = num_workers,
shuffle = False,
batch_size = batch_size,
resize = 256,
data_shape = (3, img_size, img_size),
mean_r = mean_rgb[0],
mean_g = mean_rgb[1],
mean_b = mean_rgb[2],
std_r = std_rgb[0],
std_g = std_rgb[1],
std_b = std_rgb[2],
)
return val_data, batch_fn
def get_model(model_name, batch_size, qconfig, target=None, original=False, simulated=False):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == 'inceptionv3' else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
net = mod['main']
with relay.build_config(opt_level=3):
qfunc = relay.quantize.prerequisite_optimize(net, params=params)
logging.debug('original')
logging.debug(qfunc.astext(show_meta_data=False))
if original:
return qfunc
with qconfig:
logging.debug('current quantize config')
logging.debug(qtz.current_qconfig())
qfunc = qtz.quantize(qfunc)
logging.debug('after quantize')
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc
def eval_acc(model, dataset, batch_fn, target=tvm.target.cuda(), ctx=tvm.gpu(), log_interval=100):
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(model, target)
# create runtime module
m = tvm.contrib.graph_runtime.create(graph, lib, ctx)
m.set_input(**params)
# setup evaluaiton metric
dataset.reset()
batch_size = dataset.batch_size
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1.reset()
acc_top5.reset()
# Execute
for i, batch in enumerate(dataset):
data, label = batch_fn(batch, [mx.cpu(0)])
m.run(data=data[0].asnumpy())
out_arr = m.get_output(0)
acc_top1.update(label, [mx.nd.array(out_arr.asnumpy())])
acc_top5.update(label, [mx.nd.array(out_arr.asnumpy())])
if not (i + 1) % log_interval:
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
nsamples = (i + 1) * batch_size
logging.info('[%d samples] validation: acc-top1=%f acc-top5=%f', nsamples, top1, top5)
logging.info('[final] validation: acc-top1=%f acc-top5=%f', top1, top5)
return top1
def test_quantize_acc(cfg, rec_val):
qconfig = qtz.qconfig(skip_conv_layers=[0],
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
global_scale=cfg.global_scale,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None)
model = get_model(cfg.model, 32, qconfig, tvm.target.cuda())
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=32)
acc = eval_acc(model, val_data, batch_fn)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
#TODO(for user): replace the line with the path to imagenet validation dataset
rec_val = "/scratch/tqchen/imagenet/val.rec"
results = []
configs = [
Config('mobilenetv2_1.0', nbit_input=8, dtype_input='int8', nbit_output=32, dtype_output='int32', global_scale=4.0, expected_acc=0.666),
Config('resnet18_v1', nbit_input=8, dtype_input='int8', nbit_output=16, dtype_output='int16', global_scale=8.0, expected_acc=0.692),
Config('resnet18_v1', nbit_input=8, dtype_input='int8', nbit_output=32, dtype_output='int32', global_scale=8.0, expected_acc=0.692),
Config('resnet34_v1', nbit_input=8, dtype_input='int8', nbit_output=32, dtype_output='int32', global_scale=8.0, expected_acc=0.733),
Config('resnet50_v1', nbit_input=8, dtype_input='int8', nbit_output=32, dtype_output='int32', global_scale=8.0, expected_acc=0.747),
Config('resnet101_v1', nbit_input=8, dtype_input='int8', nbit_output=32, dtype_output='int32', global_scale=8.0, expected_acc=0.756),
# TODO: need to fix accuracy
# Config('mobilenetv2_1.0', nbit_input=8, dtype_input='int8', nbit_output=16, dtype_output='int16', global_scale=4.0),
]
for config in configs:
acc = test_quantize_acc(config, rec_val)
results.append((config, acc))
for res in results:
print(res)
| apache-2.0 | -8,688,544,901,319,374,000 | 40.823529 | 144 | 0.623847 | false |
trujunzhang/djzhang-targets | cwgooglelinkedin/cwgooglelinkedin/spiders/googlelinkedin_spider.py | 1 | 2355 | # -*- coding: utf-8 -*-
from random import Random
import scrapy
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy_webdriver.http import WebdriverRequest
# yield WebdriverRequest(_url, callback=self.parse_category_full_page)
from cwgooglelinkedin.items import GoogleLinkedIn
import urlparse
class GoogleLinkedInsSpider(scrapy.Spider):
name = "googlelinkedin"
allowed_domains = ["google.com"]
start_urls = [
'https://www.google.com/search?num=100&biw=1884&bih=1082&q=%22Small+Tree+*+*+*+*+*+manager%22+site%3Alinkedin.com%2Fin+-dir&oq=%22Small+Tree+*+*+*+*+*+manager%22+site%3Alinkedin.com%2Fin+-dir&gs_l=serp.3...7364.20727.0.21133.26.23.3.0.0.0.289.3356.0j14j3.17.0....0...1c.1.64.serp..8.0.0.KLxLM9h-CgA',
]
def __init__(self, name=None, **kwargs):
from cwgooglelinkedin.database_factory import DatabaseFactory, DatabaseTypes
self._cache_db = DatabaseFactory.get_database(DatabaseTypes.cache, kwargs['mongo_uri'])
self._history_db = DatabaseFactory.get_database(DatabaseTypes.history, kwargs['mongo_uri'])
from cwgooglelinkedin.parser.response_parser import ResponseParse
self._crawl_parser = ResponseParse()
super(GoogleLinkedInsSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(GoogleLinkedInsSpider, cls).from_crawler(crawler,
args,
mongo_uri=crawler.settings.get('MONGODB_SERVER')
)
def parse(self, response):
self._crawl_parser.parse_paginate(response.url, response, self._cache_db)
def parse_detail(self, response):
item = self._crawl_parser.parse(response.url, response)
yield item
yield scrapy.Request(item['cluster'], self.parse_cluster)
# yield scrapy.Request(response.url, self.parse_relatived_app)
# the below is that crawl a random relatived app.
select = '//a[@class="card-click-target"]'
sel = Selector(response)
navs = sel.xpath(select)
if not self._history_db.check_exist(abstractPath):
yield scrapy.Request(abstractPath, self.parse_detail, meta={'type': title})
| mit | -8,530,891,606,262,700,000 | 42.611111 | 308 | 0.639066 | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/trayicon/prefs.py | 1 | 3806 | # -*- coding: utf-8 -*-
# Copyright 2004-2006 Joe Wreschnig, Michael Urman, Iñigo Serna
# 2012 Christoph Reiter
# 2013,2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet import _
from quodlibet import app
from quodlibet import qltk
from quodlibet.util import is_windows
from quodlibet.qltk import Icons
from quodlibet.pattern import Pattern
from quodlibet.qltk.entry import UndoEntry
from .util import pconfig
def supports_scrolling():
"""If our tray icon implementation supports scrolling"""
return not is_windows()
class Preferences(Gtk.VBox):
"""A small window to configure the tray icon's tooltip."""
def __init__(self):
super(Preferences, self).__init__(spacing=12)
self.set_border_width(6)
ccb = pconfig.ConfigCheckButton(_("Hide main window on close"),
'window_hide', populate=True)
self.pack_start(qltk.Frame(_("Behavior"), child=ccb), False, True, 0)
def on_scroll_changed(button, new_state):
if button.get_active():
pconfig.set("modifier_swap", new_state)
modifier_swap = pconfig.getboolean("modifier_swap")
scrollwheel_box = Gtk.VBox(spacing=0)
group = Gtk.RadioButton(
group=None, label=_("Scroll wheel adjusts volume"),
use_underline=True)
group.connect("toggled", on_scroll_changed, False)
group.set_active(not modifier_swap)
scrollwheel_box.pack_start(group, False, True, 0)
group = Gtk.RadioButton(
group=group, label=_("Scroll wheel changes song"),
use_underline=True)
group.connect("toggled", on_scroll_changed, True)
group.set_active(modifier_swap)
scrollwheel_box.pack_start(group, False, True, 0)
if supports_scrolling():
self.pack_start(
qltk.Frame(_("Scroll _Wheel"), child=scrollwheel_box),
True, True, 0)
box = Gtk.VBox(spacing=6)
entry_box = Gtk.HBox(spacing=6)
entry = UndoEntry()
entry_box.pack_start(entry, True, True, 0)
def on_reverted(*args):
pconfig.reset("tooltip")
entry.set_text(pconfig.gettext("tooltip"))
revert = Gtk.Button()
revert.add(Gtk.Image.new_from_icon_name(
Icons.DOCUMENT_REVERT, Gtk.IconSize.BUTTON))
revert.connect("clicked", on_reverted)
entry_box.pack_start(revert, False, True, 0)
box.pack_start(entry_box, False, True, 0)
preview = Gtk.Label()
preview.set_line_wrap(True)
preview_frame = Gtk.Frame(label=_("Preview"))
vbox = Gtk.VBox(margin=18)
vbox.pack_start(preview, False, False, 0)
preview_frame.add(vbox)
box.pack_start(preview_frame, False, True, 0)
tt_frame = qltk.Frame(_("Tooltip Display"), child=box)
tt_frame.get_label_widget().set_mnemonic_widget(entry)
self.pack_start(tt_frame, True, True, 0)
entry.connect('changed', self.__changed_entry, preview, preview_frame)
entry.set_text(pconfig.gettext("tooltip"))
for child in self.get_children():
child.show_all()
def __changed_entry(self, entry, label, frame):
text = entry.get_text()
if app.player.info is None:
text = _("Not playing")
else:
text = Pattern(text) % app.player.info
label.set_text(text)
frame.set_tooltip_text(text)
pconfig.set("tooltip", entry.get_text())
| gpl-2.0 | -3,058,056,470,447,531,500 | 32.672566 | 78 | 0.621288 | false |
kbrannan/PyHSPF | examples/evapotranspiration/etexample03.py | 2 | 6120 | # etexample03.py
#
# David J. Lampert ([email protected])
#
# last updated: 03/22/2015
#
# this example shows how to use the ETCalculator class to compute hourly
# reference evapotranspiration from the other time series after using the
# ClimateProcessor class to extract and aggregate the climate data from the
# World Wide Web. the first part is really the same as the previous example,
# with the differences coming at the end. the script consists of five parts:
#
# 1. download and aggregate climate data
# 2. get pan evaporation data
# 3. get areal-weighted average latitude, longitude, and elevation
# 4. calculate hourly reference evapotranspiration
# 5. aggregate hourly estimates and compare with pan evaporation observations
#
import os, datetime, pickle
from pyhspf.preprocessing import ClimateProcessor, ETCalculator
from shapefile import Reader
# output directory for data files
output = 'HSPF_data'
if not os.path.isdir(output): os.mkdir(output)
# start and end dates for data download
start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(2010, 1, 1)
# use the "subbasin_catchments" shapefile to define the data processing area
filename = 'subbasin_catchments'
if not os.path.isfile(filename + '.shp'):
print('error: file {} does not exist!'.format(filename))
raise
# make an instance of the ClimateProcessor to fetch the climate data
processor = ClimateProcessor()
# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor
processor.download_shapefile(filename, start, end, output, space = 0.)
# let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD
tmax = processor.aggregate('GSOD', 'tmax', start, end)
tmin = processor.aggregate('GSOD', 'tmin', start, end)
dewt = processor.aggregate('GSOD', 'dewpoint', start, end)
wind = processor.aggregate('GSOD', 'wind', start, end)
# let's use the hourly METSTAT data from the NSRDB for solar radiation
solar = processor.aggregate('NSRDB', 'metstat', start, end)
# aggregate the hourly solar data to daily for plotting
dsolar = [sum(solar[i:i+24]) / 24 for i in range(0, 24 * (end-start).days, 24)]
# the ETCalculator class uses the Penman-Monteith Equation to estimate
# evapotranspiration time series; make an instance to use
calculator = ETCalculator()
# some of the parameters in the Penman-Monteith Equation depend on the
# geographic location so let's use the information in the shapefile to
# provide the average longitude, latitude, and elevation
sf = Reader(filename)
# make a list of the fields for each shape
fields = [f[0] for f in sf.fields]
# get the area, centroid and elevation of each shape
areas = [r[fields.index('AreaSqKm') - 1] for r in sf.records()]
xs = [r[fields.index('CenX') - 1] for r in sf.records()]
ys = [r[fields.index('CenY') - 1] for r in sf.records()]
zs = [r[fields.index('AvgElevM') - 1] for r in sf.records()]
# get the areal-weighted averages
lon = sum([a * x for a, x in zip(areas, xs)]) / sum(areas)
lat = sum([a * y for a, y in zip(areas, ys)]) / sum(areas)
elev = sum([a * z for a, z in zip(areas, zs)]) / sum(areas)
# add the information to the calculator
calculator.add_location(lon, lat, elev)
# add the daily time series to the calculator for plotting
calculator.add_timeseries('tmin', 'daily', start, tmin)
calculator.add_timeseries('tmax', 'daily', start, tmax)
calculator.add_timeseries('dewpoint', 'daily', start, dewt)
calculator.add_timeseries('wind', 'daily', start, wind)
calculator.add_timeseries('solar', 'daily', start, dsolar)
# calculate the hourly temperature time series
hourlytemps = calculator.interpolate_temperatures(start, end)
# assume the values for wind speed and dewpoint are constant throughout the day
hdewt = [v for v in dewt for i in range(24)]
hwind = [v for v in wind for i in range(24)]
# now add the hourly time series to the calculator
calculator.add_timeseries('temperature', 'hourly', start, hourlytemps)
calculator.add_timeseries('dewpoint', 'hourly', start, hdewt)
calculator.add_timeseries('wind', 'hourly', start, hwind)
# the solar radiation data from NSRDB are already hourly
calculator.add_timeseries('solar', 'hourly', start, solar)
# calculate the reference evapotranspiration (RET) time series from the hourly
# and daily Penman-Monteith Equation
calculator.penman_hourly(start, end)
# save the time series for later (i.e., to add to an HSPF Model)
RET = [e for e in calculator.hourly['RET'][1]]
data = start, 60, RET
filename = '{}/hourlyRET'.format(output)
with open(filename, 'wb') as f: pickle.dump(data, f)
# aggregate the hourly to daily and add it to the calculator for plotting
dRET = [sum(RET[i:i+24]) for i in range(0, len(RET), 24)]
calculator.add_timeseries('RET', 'daily', start, dRET)
# parse the GHCND data for pan evaporation observations and store the file
# paths (the ETCalculator will automatically extract the data from the file)
evaporations = []
for k, v in processor.metadata.ghcndstations.items():
if v['evap'] > 0: evaporations.append(k)
# the ETCalculator has a few public plotting methods including a plot similar
# to the first example (daily ET)
filename = '{}/referenceET'.format(output)
# the plotET method has a few optional keyword arguments including a dictionary
# of GHCND stations for comparing the model with pan evaporation data,
# start and end dates, and some other plotting parameters but using
# hourly estimates aggregated to a daily time step
calculator.plotET(stations = evaporations, output = filename, show = True)
# the "dayofyear" plot converts long time series to the water year
# (Oct 1, Year -- Sept 30, Year + 1). these plots provide a useful way to
# examine long-term trends in hydrology of the watershed
filename = '{}/dayofyearET'.format(output)
# note that there appears to be an error in the Upper Marlboro dataset in 2000
calculator.plotdayofyear(stations = evaporations, output = filename,
show = True)
| bsd-3-clause | -7,549,415,752,637,281,000 | 33.772727 | 80 | 0.728758 | false |
pythonprobr/metaprog101 | patterns/strategy/strategy_fn.py | 1 | 2476 |
"""
>>> joe = Customer('John Doe', 0)
>>> ann = Customer('Ann Smith', 1100)
>>> cart = [LineItem('banana', 3, .5),
... LineItem('apple', 10, 1.5),
... LineItem('watermellon', 5, 5.0)]
>>> Order(joe, cart, fidelity_promo)
<Order total: 41.50 due: 41.50>
>>> Order(ann, cart, fidelity_promo)
<Order total: 41.50 due: 39.42>
>>> banana_cart = [LineItem('banana', 30, .5),
... LineItem('apple', 10, 1.5)]
>>> Order(joe, banana_cart, bulk_promo)
<Order total: 30.00 due: 28.50>
>>> long_order = [LineItem(str(code), 1, 1.0) for code in range(10)]
>>> Order(joe, long_order, large_order_promo)
<Order total: 10.00 due: 9.30>
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
Customer = namedtuple('Customer', 'name fidelity')
class LineItem:
def __init__(self, product, quantity, price):
self.__dict__.update(locals())
@property
def total(self):
return self.price * self.quantity
class Order:
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
@property
def total(self):
try:
return self.__total
except AttributeError:
self.__total = sum(item.quantity * item.price
for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total, self.due())
# Note: no abstract class
def fidelity_promo(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total * .05 if order.customer.fidelity >= 1000 else 0
def bulk_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total * .07
strategies = [fidelity_promo, bulk_promo, large_order_promo]
| mit | -8,179,683,405,351,398,000 | 26.820225 | 72 | 0.581987 | false |
LCAS/spqrel_tools | slu4p/slu_utils.py | 1 | 1032 | def lines_to_list(_file):
with open(_file) as f:
_list = f.readlines()
return [x.strip() for x in _list]
def normalize(sublist):
m = .0
for trans in sublist:
m = m + sublist[trans]
for trans in sublist:
sublist[trans] = sublist[trans] / m
return sublist
def list_to_dict(transcriptions):
d = {}
for asr in transcriptions:
counter = 0
d[asr[0]] = {}
for trans in asr[1]:
counter = counter + 1
d[asr[0]][trans] = counter
return d
def list_to_dict_w_probabilities(transcriptions):
d = {}
for asr in transcriptions:
d[asr[0]] = {}
for trans in asr[1]:
d[asr[0]][trans[0]] = trans[1]
return d
def pick_best(transcriptions):
confidence = 1.1
for asr in transcriptions:
for hypo in transcriptions[asr]:
if transcriptions[asr][hypo] < confidence:
best_hypo = hypo
confidence = transcriptions[asr][hypo]
return best_hypo
| mit | -7,547,638,485,822,024,000 | 23 | 54 | 0.556202 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_group_criterion_approval_status.py | 1 | 1278 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'AdGroupCriterionApprovalStatusEnum',
},
)
class AdGroupCriterionApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible AdGroupCriterion
approval statuses.
"""
class AdGroupCriterionApprovalStatus(proto.Enum):
r"""Enumerates AdGroupCriterion approval statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
PENDING_REVIEW = 4
UNDER_REVIEW = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -6,542,211,844,050,196,000 | 29.428571 | 74 | 0.697183 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/code/browser/tests/test_branchsubscription.py | 1 | 2631 | # Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Unit tests for BranchSubscriptions."""
__metaclass__ = type
from lp.app.enums import InformationType
from lp.services.webapp.interfaces import IPrimaryContext
from lp.testing import (
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
from lp.testing.views import create_initialized_view
class TestBranchSubscriptionPrimaryContext(TestCaseWithFactory):
# Tests the adaptation of a branch subscription into a primary context.
layer = DatabaseFunctionalLayer
def testPrimaryContext(self):
# The primary context of a branch subscription is the same as the
# primary context of the branch that the subscription is for.
subscription = self.factory.makeBranchSubscription()
self.assertEqual(
IPrimaryContext(subscription).context,
IPrimaryContext(subscription.branch).context)
class TestBranchSubscriptionAddOtherView(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_cannot_subscribe_open_team_to_private_branch(self):
owner = self.factory.makePerson()
branch = self.factory.makeBranch(
information_type=InformationType.USERDATA, owner=owner)
team = self.factory.makeTeam()
form = {
'field.person': team.name,
'field.notification_level': 'NOEMAIL',
'field.max_diff_lines': 'NODIFF',
'field.review_level': 'NOEMAIL',
'field.actions.subscribe_action': 'Subscribe'}
with person_logged_in(owner):
view = create_initialized_view(
branch, '+addsubscriber', pricipal=owner, form=form)
self.assertContentEqual(
['Open and delegated teams cannot be subscribed to private '
'branches.'], view.errors)
def test_can_subscribe_open_team_to_public_branch(self):
owner = self.factory.makePerson()
branch = self.factory.makeBranch(owner=owner)
team = self.factory.makeTeam()
form = {
'field.person': team.name,
'field.notification_level': 'NOEMAIL',
'field.max_diff_lines': 'NODIFF',
'field.review_level': 'NOEMAIL',
'field.actions.subscribe_action': 'Subscribe'}
with person_logged_in(owner):
view = create_initialized_view(
branch, '+addsubscriber', pricipal=owner, form=form)
self.assertContentEqual([], view.errors)
| agpl-3.0 | 1,524,930,731,386,648,000 | 38.268657 | 76 | 0.664386 | false |
foobarbazblarg/stayclean | stayclean-2019-may/update-google-chart.py | 1 | 7882 | #!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
import datetime
from participantCollection import ParticipantCollection
# Edit Me!
participantFileNames = ['../stayclean-2014-november/participants.txt',
'../stayclean-2014-december/participants.txt',
'../stayclean-2015-january/participants.txt',
'../stayclean-2015-february/participants.txt',
'../stayclean-2015-march/participants.txt',
'../stayclean-2015-april/participants.txt',
'../stayclean-2015-may/participants.txt',
'../stayclean-2015-june/participants.txt',
'../stayclean-2015-july/participants.txt',
'../stayclean-2015-august/participants.txt',
'../stayclean-2015-september/participants.txt',
'../stayclean-2015-october/participants.txt',
'../stayclean-2015-november/participants.txt',
'../stayclean-2015-december/participants.txt',
'../stayclean-2016-january/participants.txt',
'../stayclean-2016-february/participants.txt',
'../stayclean-2016-march/participants.txt',
'../stayclean-2016-april/participants.txt',
'../stayclean-2016-may/participants.txt',
'../stayclean-2016-june/participants.txt',
'../stayclean-2016-july/participants.txt',
'../stayclean-2016-august/participants.txt',
'../stayclean-2016-september/participants.txt',
'../stayclean-2016-october/participants.txt',
'../stayclean-2016-november/participants.txt',
'../stayclean-2016-december/participants.txt',
'../stayclean-2017-january/participants.txt',
'../stayclean-2017-february/participants.txt',
'../stayclean-2017-march/participants.txt',
'../stayclean-2017-april/participants.txt',
'../stayclean-2017-may/participants.txt',
'../stayclean-2017-june/participants.txt',
'../stayclean-2017-july/participants.txt',
'../stayclean-2017-august/participants.txt',
'../stayclean-2017-september/participants.txt',
'../stayclean-2017-october/participants.txt',
'../stayclean-2017-november/participants.txt',
'../stayclean-2017-december/participants.txt',
'../stayclean-2018-january/participants.txt',
'../stayclean-2018-february/participants.txt',
'../stayclean-2018-march/participants.txt',
'../stayclean-2018-april/participants.txt',
'../stayclean-2018-may/participants.txt',
'../stayclean-2018-june/participants.txt',
'../stayclean-2018-july/participants.txt',
'../stayclean-2018-august/participants.txt',
'../stayclean-2018-september/participants.txt',
'../stayclean-2018-october/participants.txt',
'../stayclean-2018-november/participants.txt',
'../stayclean-2018-december/participants.txt',
'../stayclean-2019-january/participants.txt',
'../stayclean-2019-february/participants.txt',
'../stayclean-2019-march/participants.txt',
'../stayclean-2019-april/participants.txt',
'./participants.txt']
sortedRelapseDates = []
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
sortedRelapseDates = sortedRelapseDates + participants.allRelapseDates()
sortedRelapseDates.sort()
earliestReportDate = sortedRelapseDates[0]
latestReportDate = sortedRelapseDates[-1]
reportDates = []
numberOfRelapsesPerDate = []
reportDatesAndNumberOfRelapses = {}
dayOfWeekIndexesAndNumberOfInstances = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
reportDate = earliestReportDate
while reportDate <= latestReportDate:
reportDatesAndNumberOfRelapses[reportDate] = 0
# dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] = dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] + 1
dayOfWeekIndexesAndNumberOfInstances[reportDate.weekday()] += 1
reportDate += datetime.timedelta(days=1)
for relapseDate in sortedRelapseDates:
# reportDatesAndNumberOfRelapses[relapseDate] = reportDatesAndNumberOfRelapses[relapseDate] + 1
reportDatesAndNumberOfRelapses[relapseDate] += 1
dayOfWeekIndexesAndTotalNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for participantFileName in participantFileNames:
participants = ParticipantCollection(fileNameString=participantFileName)
# print participants.relapseDayOfWeekIndexesAndParticipants()
for index, parts in participants.relapseDayOfWeekIndexesAndParticipants().iteritems():
# dayOfWeekIndexesAndTotalNumberOfRelapses[index] = dayOfWeekIndexesAndTotalNumberOfRelapses[index] + len(parts)
dayOfWeekIndexesAndTotalNumberOfRelapses[index] += len(parts)
dayOfWeekIndexesAndAverageNumberOfRelapses = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for index, instances in dayOfWeekIndexesAndNumberOfInstances.iteritems():
# dayOfWeekIndexesAndAverageNumberOfRelapses[index] = int(round(float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)))
dayOfWeekIndexesAndAverageNumberOfRelapses[index] = float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances)
spreadsheetTitle = "StayClean monthly challenge relapse data"
# spreadsheetTitle = "Test spreadsheet"
json_key = json.load(open('../google-oauth-credentials.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
spreadSheet = None
try:
spreadSheet = gc.open(spreadsheetTitle)
except gspread.exceptions.SpreadsheetNotFound:
print "No spreadsheet with title " + spreadsheetTitle
exit(1)
workSheet = spreadSheet.get_worksheet(0)
columnACells = workSheet.range("A2:A" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnBCells = workSheet.range("B2:B" + str(len(reportDatesAndNumberOfRelapses) + 1))
columnCCells = workSheet.range("C2:C8")
columnDCells = workSheet.range("D2:D8")
reportDate = earliestReportDate
rowIndex = 0
while reportDate <= latestReportDate:
columnACells[rowIndex].value = str(reportDate)
columnBCells[rowIndex].value = str(reportDatesAndNumberOfRelapses[reportDate])
rowIndex += 1
reportDate += datetime.timedelta(days=1)
for weekdayIndex in range(0, 7):
weekdayName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'][weekdayIndex]
# spreadsheetClient.UpdateCell(weekdayIndex + 2,3,weekdayName,spreadsheetId)
# spreadsheetClient.UpdateCell(weekdayIndex + 2,4,str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex]),spreadsheetId)
columnCCells[weekdayIndex].value = weekdayName
columnDCells[weekdayIndex].value = str(dayOfWeekIndexesAndAverageNumberOfRelapses[weekdayIndex])
allCells = columnACells + columnBCells + columnCCells + columnDCells
workSheet.update_cells(allCells)
exit(0)
| mit | 6,415,359,784,071,522,000 | 56.955882 | 143 | 0.649454 | false |
nickpowersys/CaaR | caar/pandas_tseries_tools.py | 1 | 5513 | #--------------------------------------------------------------------------------
# Copyright (c) 2012, PyData Development Team
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the PANDAS_LICENSE file, distributed with this software.
#--------------------------------------------------------------------------------
# FROM PANDAS GITHUB REPOSITORY pandas/tseries/tools.py as of commit 31c2e5f,
# from July 19, 2016
# __future__ and future imports are only in caar for PY2/PY3, not pandas
from __future__ import absolute_import, division, print_function
import pandas.compat as compat
from future import standard_library # caar PY2/PY3
standard_library.install_aliases() #
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(compat.StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _guess_datetime_format(dt_str, dayfirst=False,
dt_str_parse=compat.parse_date,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
dayfirst : boolean, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
This function should take in a datetime string and return
a `datetime.datetime` guess that the datetime string represents
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
if dt_str_parse is None or dt_str_split is None:
return None
if not isinstance(dt_str, compat.string_types):
return None
day_attribute_and_format = (('day',), '%d', 2)
# attr name, format, padding (if any)
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d', 0),
(('year',), '%Y', 0),
(('month',), '%B', 0),
(('month',), '%b', 0),
(('month',), '%m', 2),
day_attribute_and_format,
(('hour',), '%H', 2),
(('minute',), '%M', 2),
(('second',), '%S', 2),
(('microsecond',), '%f', 6),
(('second', 'microsecond'), '%S.%f', 0),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
try:
parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
except:
# In case the datetime can't be parsed, its format cannot be guessed
return None
if parsed_datetime is None:
return None
try:
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format, padding in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
token_filled = tokens[i].zfill(padding)
if (token_format is None and
token_filled == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
tokens[i] = token_filled
found_attrs.update(attrs)
break
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
float(tokens[i])
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
# rebuild string, capturing any inferred padding
dt_str = ''.join(tokens)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
| bsd-3-clause | -5,352,531,936,608,387,000 | 35.509934 | 81 | 0.584981 | false |
inveniosoftware/invenio-records-rest | invenio_records_rest/serializers/citeproc.py | 1 | 4713 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CSL Citation Formatter serializer for records."""
from __future__ import absolute_import, print_function
import json
import re
from citeproc import Citation, CitationItem, CitationStylesBibliography, \
CitationStylesStyle, formatter
from citeproc.source.bibtex import BibTeX
from citeproc.source.json import CiteProcJSON
from flask import has_request_context, request
from webargs import fields
from webargs.flaskparser import FlaskParser
from ..errors import StyleNotFoundRESTError
try:
from citeproc_styles import get_style_filepath
from citeproc_styles.errors import StyleNotFoundError
except Exception:
import warnings
warnings.warn('citeproc_styles not found. '
'Please install to enable Citeproc Serialization.')
class CiteprocSerializer(object):
"""CSL Citation Formatter serializer for records.
In order to produce a formatted citation of a record through citeproc-py,
we need a CSL-JSON or BibTeX serialized version of it. Since there may be
already an implementation of such a serializer, it can be passed in the
constructor of this class. This serializer has to implement a `serialize`
method that returns the CSL-JSON/BibTeX result.
"""
_default_style = 'harvard1'
"""The `citeproc-py` library supports by default the 'harvard1' style."""
_default_locale = 'en-US'
"""The `citeproc-py` library supports by default the 'harvard1' style."""
_user_args = {
'style': fields.Str(missing=_default_style),
'locale': fields.Str(missing=_default_locale)
}
"""Arguments for the webargs parser."""
_valid_formats = ('csl', 'bibtex')
"""Supported formats by citeproc-py."""
def __init__(self, serializer, record_format='csl'):
"""Initialize the inner record serializer.
:param serializer: Serializer object that does the record serialization
to a format that `citeproc-py` can process (CSL-JSON or BibTeX).
The object has to implement a `serialize` method that matches the
signature of the `serialize` method of this class.
:param record_format: Format that the serializer produces.
"""
assert record_format in self._valid_formats
assert getattr(serializer, 'serialize', None)
assert callable(getattr(serializer, 'serialize'))
self.serializer = serializer
self.record_format = record_format
@classmethod
def _get_args(cls, **kwargs):
"""Parse style and locale.
Argument location precedence: kwargs > view_args > query
"""
csl_args = {
'style': cls._default_style,
'locale': cls._default_locale
}
if has_request_context():
parser = FlaskParser(locations=('view_args', 'query'))
csl_args.update(parser.parse(cls._user_args, request))
csl_args.update({k: kwargs[k]
for k in ('style', 'locale') if k in kwargs})
try:
csl_args['style'] = get_style_filepath(csl_args['style'].lower())
except StyleNotFoundError:
if has_request_context():
raise StyleNotFoundRESTError(csl_args['style'])
raise
return csl_args
def _get_source(self, data):
"""Get source data object for citeproc-py."""
if self.record_format == 'csl':
return CiteProcJSON([json.loads(data)])
elif self.record_format == 'bibtex':
return BibTeX(data)
def _clean_result(self, text):
"""Remove double spaces, punctuation and escapes apostrophes."""
text = re.sub(r'\s\s+', ' ', text)
text = re.sub(r'\.\.+', '.', text)
text = text.replace("'", "\\'")
return text
def serialize(self, pid, record, links_factory=None, **kwargs):
"""Serialize a single record.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links.
"""
data = self.serializer.serialize(pid, record, links_factory)
source = self._get_source(data)
style = CitationStylesStyle(validate=False, **self._get_args(**kwargs))
bib = CitationStylesBibliography(style, source, formatter.plain)
citation = Citation([CitationItem(pid.pid_value)])
bib.register(citation)
return self._clean_result(''.join(bib.bibliography()[0]))
| mit | -8,886,878,848,166,447,000 | 34.704545 | 79 | 0.649268 | false |
Tethik/ericsson-hackathon | server/server/settings.py | 1 | 2167 | """
Django settings for server project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xjch86&*&ha*jph5c_%t_trlm*+aun4g$j*u0g)s9b44#_!g3p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | 2,057,896,832,154,065,200 | 22.813187 | 71 | 0.718043 | false |
pragma-/pbot | modules/paren/paren.py | 1 | 2698 | #!/usr/bin/env python
from __future__ import print_function
import sys
from pycparser import c_parser, c_generator, c_ast, plyparser
from pycparser.ply import yacc
with open("paren/stddef") as f:
STDDEF = f.read()
class CParser(c_parser.CParser):
def __init__(self, *a, **kw):
super(CParser, self).__init__(*a, **kw)
self.cparser = yacc.yacc(
module=self,
start='expression',
debug=kw.get('yacc_debug', False),
errorlog=yacc.NullLogger(),
optimize=kw.get('yacc_optimize', True),
tabmodule=kw.get('yacctab', 'yacctab'))
def parse(self, text, filename='', debuglevel=0):
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
for name in STDDEF.split('\n'):
if name:
self._add_typedef_name(name, None)
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
class CGenerator(c_generator.CGenerator):
def _is_simple_node(self, n):
return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.FuncCall))
def visit_UnaryOp(self, n):
# don't parenthesize an operand to sizeof if it's not a type
if n.op == 'sizeof':
if isinstance(n.expr, c_ast.Typename):
return 'sizeof (%s)' % self.visit(n.expr)
else:
return 'sizeof %s' % self._parenthesize_unless_simple(n.expr)
else:
operand = self.visit(n.expr)
if isinstance(n.expr, c_ast.ArrayRef) or not self._is_simple_node(n.expr):
operand = '(%s)' % operand
if n.op in ('p++', 'p--'):
return operand + n.op[1:]
else:
return n.op + operand
def visit_TernaryOp(self, n):
return '({0}) ? ({1}) : ({2})'.format(
self.visit(n.cond),
self.visit(n.iftrue),
self.visit(n.iffalse))
def visit_Assignment(self, n):
return '%s %s %s' % (self.visit(n.lvalue), n.op, self._parenthesize_unless_simple(n.rvalue))
def parenthesize(source):
parser = CParser(yacc_optimize=False)
try:
ast = parser.parse(source, '<input>')
except plyparser.ParseError as e:
print("Error: " + e.args[0])
return
generator = CGenerator()
print(generator.visit(ast))
if __name__ == "__main__":
if len(sys.argv) > 1:
parenthesize(' '.join(sys.argv[1:]).rstrip(';'))
elif len(sys.argv) == 1:
print("Usage: paren <expression>")
else:
print('error')
| mpl-2.0 | 2,477,247,791,366,992,000 | 31.119048 | 100 | 0.547072 | false |
emencia/emencia.recipe.patch | setup.py | 1 | 1777 | # -*- coding: utf-8 -*-
"""
This module contains the tool of emencia.recipe.patch
"""
from setuptools import setup, find_packages
version = '0.1'
long_description = (
open('README.rst').read() + '\n'
+ '\n' +
'Download\n'
'********\n'
)
entry_point = 'emencia.recipe.patch:Recipe'
entry_points = {"zc.buildout": ["default = %s" % entry_point]}
tests_require=['zope.testing', 'zc.buildout']
setup(name='emencia.recipe.patch',
version=version,
description="recipe for patching eggs",
long_description=long_description,
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Framework :: Buildout',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
keywords='buildout recipe patch',
author='Rok Garbas',
author_email='[email protected]',
maintainer='J. David Ibanez',
maintainer_email='[email protected]',
url='http://github.com/emencia/emencia.recipe.patch',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['emencia', 'emencia.recipe'],
include_package_data=True,
install_requires=['setuptools',
'zc.buildout',
# -*- Extra requirements: -*-
'zc.recipe.egg',
],
tests_require=tests_require,
extras_require=dict(tests=tests_require),
test_suite = 'emencia.recipe.patch.tests.test_docs.test_suite',
entry_points=entry_points,
zip_safe = True,
)
| gpl-2.0 | 6,887,943,318,520,764,000 | 33.173077 | 83 | 0.598199 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/types/account_budget_proposal_service.py | 1 | 5066 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.resources.types import account_budget_proposal
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetAccountBudgetProposalRequest',
'MutateAccountBudgetProposalRequest',
'AccountBudgetProposalOperation',
'MutateAccountBudgetProposalResponse',
'MutateAccountBudgetProposalResult',
},
)
class GetAccountBudgetProposalRequest(proto.Message):
r"""Request message for
[AccountBudgetProposalService.GetAccountBudgetProposal][google.ads.googleads.v7.services.AccountBudgetProposalService.GetAccountBudgetProposal].
Attributes:
resource_name (str):
Required. The resource name of the account-
evel budget proposal to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateAccountBudgetProposalRequest(proto.Message):
r"""Request message for
[AccountBudgetProposalService.MutateAccountBudgetProposal][google.ads.googleads.v7.services.AccountBudgetProposalService.MutateAccountBudgetProposal].
Attributes:
customer_id (str):
Required. The ID of the customer.
operation (google.ads.googleads.v7.services.types.AccountBudgetProposalOperation):
Required. The operation to perform on an
individual account-level budget proposal.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operation = proto.Field(
proto.MESSAGE,
number=2,
message='AccountBudgetProposalOperation',
)
validate_only = proto.Field(
proto.BOOL,
number=3,
)
class AccountBudgetProposalOperation(proto.Message):
r"""A single operation to propose the creation of a new account-
evel budget or edit/end/remove an existing one.
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which budget fields
are modified. While budgets may be modified,
proposals that propose such modifications are
final. Therefore, update operations are not
supported for proposals.
Proposals that modify budgets have the 'update'
proposal type. Specifying a mask for any other
proposal type is considered an error.
create (google.ads.googleads.v7.resources.types.AccountBudgetProposal):
Create operation: A new proposal to create a
new budget, edit an existing budget, end an
actively running budget, or remove an approved
budget scheduled to start in the future.
No resource name is expected for the new
proposal.
remove (str):
Remove operation: A resource name for the removed proposal
is expected, in this format:
``customers/{customer_id}/accountBudgetProposals/{account_budget_proposal_id}``
A request may be cancelled iff it is pending.
"""
update_mask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=2,
oneof='operation',
message=account_budget_proposal.AccountBudgetProposal,
)
remove = proto.Field(
proto.STRING,
number=1,
oneof='operation',
)
class MutateAccountBudgetProposalResponse(proto.Message):
r"""Response message for account-level budget mutate operations.
Attributes:
result (google.ads.googleads.v7.services.types.MutateAccountBudgetProposalResult):
The result of the mutate.
"""
result = proto.Field(
proto.MESSAGE,
number=2,
message='MutateAccountBudgetProposalResult',
)
class MutateAccountBudgetProposalResult(proto.Message):
r"""The result for the account budget proposal mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 4,730,186,862,866,976,000 | 31.683871 | 154 | 0.671141 | false |
cheeseywhiz/cheeseywhiz | http/data/csv-vis.py | 1 | 3063 | #!/usr/bin/env python3
import csv
import sys
import matplotlib.pyplot as plt
from config import data_sets, fontdict
try:
sys.argv[1]
if sys.argv[1] not in data_sets:
raise IndexError
except IndexError as error:
keys = '\n'.join(key for key in data_sets)
print(f'Data sets:\n{keys}\nPut in arg #1')
sys.exit(1)
data_set = data_sets[sys.argv[1]]
# allowing for None end chars
if data_set['str-end-chars'] is not None:
data_set['str-end-chars'] *= -1
with open(data_set['file-location']) as file:
# for processing huge files
csv.field_size_limit(sys.maxsize)
# you can unpack a list: no tupling required here
raw_data = list(csv.reader(file))
print('raw_data')
# headers from data[0] so far
# strip MULTIPOLYGON ((( ))) from coordinates string
# remove headers row [0]
formatted_data = [
(
row[data_set['label-index']].capitalize(),
row[data_set['data-index']][
data_set['str-start-chars']:data_set['str-end-chars']
]
)
for row in raw_data[1:]
]
print('formatted_data')
# mo county data pairs coords differently
if data_set == data_sets['mo-counties']:
formatted_data = [
(label, coords.replace(',', ' '))
for label, coords in formatted_data
]
# split up numbers to furthur work with
split_coords = [
(label, coords_str.split(' '))
for label, coords_str in formatted_data
]
print('split_coords')
# turn strings into floats by trimming off traiing characters if necessary
def float_recur(str, n=1):
if n > 1000: # Or else it causes stack overflow (???)
return None # Also good for debugging
try:
return float(str)
except Exception:
return float_recur(str[:-1], n=n + 1)
float_coords = [
(label, [float_recur(coord) for coord in coords_str])
for label, coords_str in split_coords
]
print('float_coords')
# throw pairs of consecutive lat/longs together in a single tuple
def combine(list):
for i in range(len(list)):
if not i % 2:
yield list[i], list[i + 1]
coord_pairs = [
(label, [i for i in combine(coords)])
for label, coords in float_coords
]
print('coord_pairs')
# calculate the center of the area to place the label
def center(points: list):
# filter out None values from combine() generator
points = [
(x, y)
for x, y in points
if not (x is None or y is None)
]
def avg(list):
return sum(list) / len(list)
x, y = zip(*points)
return avg(x), avg(y)
label_geom_center = [
(label, coords, center(coords))
for label, coords in coord_pairs
]
print('label_geom_center')
# convert pairs of coordinates into lists of lats and longs
boundaries = [
(label, zip(*coords), center)
for label, coords, center in label_geom_center
]
print('boundaries')
# plot the data
for label, boundary, center in boundaries:
plt.plot(*boundary)
if data_set['show-labels']:
plt.text(*center, label, fontdict=fontdict)
print('showing plot')
plt.show()
print('done')
| mit | -5,690,825,025,149,385,000 | 23.309524 | 74 | 0.63859 | false |
dandxy89/ImageModels | KerasLayers/Custom_layers.py | 1 | 1441 | from keras.engine import Layer, InputSpec
from keras import initializations
from keras import backend as K
class LRN2D(Layer):
def __init__(self, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
if n % 2 == 0:
raise NotImplementedError(
"LRN2D only works with odd n. n provided: " + str(n))
super(LRN2D, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def get_output(self, train):
X = self.get_input(train)
b, ch, r, c = K.shape(X)
half_n = self.n // 2
input_sqr = K.square(X)
extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
input_sqr,
extra_channels[:, half_n + ch:, :, :]],
axis=1)
scale = self.k
for i in range(self.n):
scale += self.alpha * input_sqr[:, i:i + ch, :, :]
scale = scale ** self.beta
return X / scale
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 | -2,740,478,046,143,277,000 | 31.75 | 74 | 0.473282 | false |
anntzer/scikit-learn | sklearn/ensemble/_weight_boosting.py | 2 | 43115 | """Weight Boosting.
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The `BaseWeightBoosting` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting
(AdaBoost-SAMME) for classification problems.
- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting
(AdaBoost.R2) for regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import xlogy
from ._base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_classifier, is_regressor
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, _safe_indexing
from ..utils.extmath import softmax
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
from ..utils.validation import check_is_fitted
from ..utils.validation import _check_sample_weight
from ..utils.validation import has_fit_parameter
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None, *,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def _check_X(self, X):
# Only called to validate X in non-fit methods, therefore reset=False
return self._validate_data(
X, accept_sparse=['csr', 'csc'], ensure_2d=True, allow_nd=True,
dtype=None, reset=False)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
X, y = self._validate_data(X, y,
accept_sparse=['csr', 'csc'],
ensure_2d=True,
allow_nd=True,
dtype=None,
y_numeric=is_regressor(self))
sample_weight = _check_sample_weight(sample_weight, X, np.float64)
sample_weight /= sample_weight.sum()
if np.any(sample_weight < 0):
raise ValueError("sample_weight cannot contain negative weights")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
# Initializion of the random number instance that will be used to
# generate a seed at each iteration
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight,
random_state)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
"""
X = self._check_X(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError as e:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute") from e
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
base_estimator : object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
initialized with `max_depth=1`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, default=1.
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `base_estimator` at each
boosting iteration.
Thus, it is only used when `base_estimator` exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``base_estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
See Also
--------
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
regressor on the original dataset and then fits additional copies of
the regressor on the same dataset but where the weights of instances
are adjusted according to the error of the current prediction.
GradientBoostingClassifier : GB builds an additive model in a forward
stage-wise fashion. Regression trees are fit on the negative gradient
of the binomial or multinomial deviance loss function. Binary
classification is a special case where only a single regression tree is
induced.
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
method used for classification.
Creates a model that predicts the value of a target variable by
learning simple decision rules inferred from the data features.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.983...
"""
@_deprecate_positional_args
def __init__(self,
base_estimator=None, *,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Fitted estimator.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight,
random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* ((n_classes - 1.) / n_classes)
* xlogy(y_coding, y_predict_proba).sum(axis=1))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
(sample_weight > 0))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
@staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (4) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= (n_classes - 1)
return softmax(decision, copy=False)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
-------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
X = self._check_X(X)
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
X = self._check_X(X)
return np.log(self.predict_proba(X))
class AdaBoostRegressor(RegressorMixin, BaseWeightBoosting):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
base_estimator : object, default=None
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
`max_depth=3`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, default=1.
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, default='linear'
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `base_estimator` at each
boosting iteration.
Thus, it is only used when `base_estimator` exposes a `random_state`.
In addition, it controls the bootstrap of the weights used to train the
`base_estimator` at each boosting iteration.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``base_estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
AdaBoostRegressor(n_estimators=100, random_state=0)
>>> regr.predict([[0, 0, 0, 0]])
array([4.7972...])
>>> regr.score(X, y)
0.9771...
See Also
--------
AdaBoostClassifier, GradientBoostingRegressor,
sklearn.tree.DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
@_deprecate_positional_args
def __init__(self,
base_estimator=None, *,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (real numbers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Controls also the bootstrap of the weights used to train the weak
learner.
replacement.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
bootstrap_idx = random_state.choice(
np.arange(_num_samples(X)), size=_num_samples(X), replace=True,
p=sample_weight
)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
X_ = _safe_indexing(X, bootstrap_idx)
y_ = _safe_indexing(y, bootstrap_idx)
estimator.fit(X_, y_)
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
sample_mask = sample_weight > 0
masked_sample_weight = sample_weight[sample_mask]
masked_error_vector = error_vect[sample_mask]
error_max = masked_error_vector.max()
if error_max != 0:
masked_error_vector /= error_max
if self.loss == 'square':
masked_error_vector **= 2
elif self.loss == 'exponential':
masked_error_vector = 1. - np.exp(-masked_error_vector)
# Calculate the average loss
estimator_error = (masked_sample_weight * masked_error_vector).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight[sample_mask] *= np.power(
beta, (1. - masked_error_vector) * self.learning_rate
)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
# Return median predictions
return predictions[np.arange(_num_samples(X)), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
Yields
-------
y : generator of ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause | -255,894,120,014,860,350 | 35.631266 | 79 | 0.597449 | false |
spcui/virt-test | libvirt/tests/src/virsh_cmd/domain/virsh_setmem.py | 1 | 9363 | import re
import logging
import time
from autotest.client.shared import error
from virttest import virsh, utils_libvirtd
def run_virsh_setmem(test, params, env):
"""
Test command: virsh setmem.
1) Prepare vm environment.
2) Handle params
3) Prepare libvirtd status.
4) Run test command and wait for current memory's stable.
5) Recover environment.
4) Check result.
TODO: support new libvirt with more options.
"""
def vm_proc_meminfo(session):
proc_meminfo = session.cmd_output("cat /proc/meminfo")
# verify format and units are expected
return int(re.search(r'MemTotal:\s+(\d+)\s+kB', proc_meminfo).group(1))
def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
# Specify domain as argument or parameter
if domarg == "yes":
dom_darg_key = "domainarg"
else:
dom_darg_key = "domain"
# How to reference domain
if vm_ref == "domid":
dom_darg_value = domid
elif vm_ref == "domname":
dom_darg_value = vm_name
elif vm_ref == "domuuid":
dom_darg_value = domuuid
elif vm_ref == "none":
dom_darg_value = None
elif vm_ref == "emptystring":
dom_darg_value = '""'
else: # stick in value directly
dom_darg_value = vm_ref
return {dom_darg_key: dom_darg_value}
def make_sizeref(sizearg, mem_ref, original_mem):
if sizearg == "yes":
size_darg_key = "sizearg"
else:
size_darg_key = "size"
if mem_ref == "halfless":
size_darg_value = "%d" % (original_mem / 2)
elif mem_ref == "halfmore":
size_darg_value = "%d" % int(original_mem * 1.5) # no fraction
elif mem_ref == "same":
size_darg_value = "%d" % original_mem
elif mem_ref == "emptystring":
size_darg_value = '""'
elif mem_ref == "zero":
size_darg_value = "0"
elif mem_ref == "toosmall":
size_darg_value = "1024"
elif mem_ref == "toobig":
size_darg_value = "1099511627776" # (KiB) One Petabyte
elif mem_ref == "none":
size_darg_value = None
else: # stick in value directly
size_darg_value = mem_ref
return {size_darg_key: size_darg_value}
def is_in_range(actual, expected, error_percent):
deviation = 100 - (100 * (float(actual) / float(expected)))
logging.debug("Deviation: %0.2f%%" % float(deviation))
return float(deviation) <= float(error_percent)
def is_old_libvirt():
regex = r'\s+\[--size\]\s+'
return bool(not virsh.has_command_help_match('setmem', regex))
def print_debug_stats(original_inside_mem, original_outside_mem,
test_inside_mem, test_outside_mem,
expected_mem, delta_percentage):
dbgmsg = ("Original inside mem : %d KiB\n"
"Expected inside mem : %d KiB\n"
"Actual inside mem : %d KiB\n"
"Inside mem deviation : %0.2f%%\n"
"Original outside mem : %d KiB\n"
"Expected outside mem : %d KiB\n"
"Actual outside mem : %d KiB\n"
"Outside mem deviation: %0.2f%%\n"
"Acceptable deviation %0.2f%%" % (
original_inside_mem,
expected_mem,
test_inside_mem,
100 -
(100 * (float(test_inside_mem) / float(expected_mem))),
original_outside_mem,
expected_mem,
test_outside_mem,
100 -
(100 * (float(test_outside_mem) / float(expected_mem))),
float(delta_percentage)))
for dbgline in dbgmsg.splitlines():
logging.debug(dbgline)
# MAIN TEST CODE ###
# Process cartesian parameters
vm_ref = params.get("setmem_vm_ref", "")
mem_ref = params.get("setmem_mem_ref", "")
flags = params.get("setmem_flags", "")
status_error = params.get("status_error", "no")
old_libvirt_fail = params.get("setmem_old_libvirt_fail", "no")
quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
domarg = params.get("setmem_domarg", "no")
sizearg = params.get("setmem_sizearg", "no")
libvirt = params.get("libvirt", "on")
delta_percentage = float(params.get("setmem_delta_per", "10"))
start_vm = params.get("start_vm", "yes")
vm_name = params.get("main_vm")
paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
# Gather environment parameters
vm = env.get_vm(params["main_vm"])
if start_vm == "yes":
if paused_after_start_vm:
vm.resume()
session = vm.wait_for_login()
original_inside_mem = vm_proc_meminfo(session)
session.close()
if paused_after_start_vm:
vm.pause()
else:
session = None
# Retrieve known mem value, convert into kilobytes
original_inside_mem = int(params.get("mem", "1024")) * 1024
original_outside_mem = vm.get_used_mem()
domid = vm.get_id()
domuuid = vm.get_uuid()
uri = vm.connect_uri
old_libvirt = is_old_libvirt()
if old_libvirt:
logging.info("Running test on older libvirt")
use_kilobytes = True
else:
logging.info("Running test on newer libvirt")
use_kilobytes = False
# Argument pattern is complex, build with dargs
dargs = {'flagstr': flags,
'use_kilobytes': use_kilobytes,
'uri': uri, 'ignore_status': True, "debug": True}
dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))
# Prepare libvirtd status
if libvirt == "off":
utils_libvirtd.libvirtd_stop()
else: # make sure it's running
utils_libvirtd.libvirtd_restart()
if status_error == "yes" or old_libvirt_fail == "yes":
logging.info("Error Test: Expecting an error to occur!")
result = virsh.setmem(**dargs)
status = result.exit_status
# Recover libvirtd status
if libvirt == "off":
utils_libvirtd.libvirtd_start()
if status is 0:
logging.info(
"Waiting %d seconds for VM memory to settle", quiesce_delay)
# It takes time for kernel to settle on new memory
# and current clean pages is not predictable. Therefor,
# extremely difficult to determine quiescence, so
# sleep one second per error percent is reasonable option.
time.sleep(quiesce_delay)
# Gather stats if not running error test
if status_error == "no" and old_libvirt_fail == "no":
if vm.state() == "shut off":
vm.start()
# Make sure it's never paused
vm.resume()
session = vm.wait_for_login()
# Actual results
test_inside_mem = vm_proc_meminfo(session)
session.close()
test_outside_mem = vm.get_used_mem()
# Expected results for both inside and outside
if sizearg == "yes":
expected_mem = int(dargs["sizearg"])
else:
expected_mem = int(dargs["size"])
print_debug_stats(original_inside_mem, original_outside_mem,
test_inside_mem, test_outside_mem,
expected_mem, delta_percentage)
if status is 0: # Restore original memory
restore_status = virsh.setmem(domainarg=vm_name,
sizearg=original_inside_mem,
ignore_status=True).exit_status
if restore_status is not 0:
logging.warning("Failed to restore VM's original memory to %s KiB"
% original_inside_mem)
else:
# virsh setmem failed, no need to restore
pass
# Don't care about memory comparison on error test
if status_error == "no" and old_libvirt_fail == "no":
outside_in_range = is_in_range(test_outside_mem, expected_mem,
delta_percentage)
inside_in_range = is_in_range(test_inside_mem, expected_mem,
delta_percentage)
if status is not 0 or not outside_in_range or not inside_in_range:
msg = "test conditions not met: "
if status is not 0:
msg += "Non-zero virsh setmem exit code. " # maybe multiple
if not outside_in_range: # errors
msg += "Outside memory deviated. "
if not inside_in_range:
msg += "Inside memory deviated. "
raise error.TestFail(msg)
return # Normal test passed
else: # Verify an error test resulted in error
if status is 0:
raise error.TestFail("Error test did not result in an error")
else: # status != 0
if not old_libvirt: # new libvirt should not have returned error
raise error.TestFail("Newer libvirt failed when it should not")
else:
# Test passes for old_libvirt is True
pass
| gpl-2.0 | 7,478,082,938,550,316,000 | 37.060976 | 79 | 0.555484 | false |
roderickmackenzie/opvdm | gui/monitor_dir.py | 1 | 3651 | # Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# [email protected]
# www.opvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pygtk
pygtk.require('2.0')
import gtk
import sys
import os
import shutil
import threading
import gobject
import multiprocessing
import glob
import socket
from time import sleep
from win_lin import running_on_linux
import subprocess
from util import gui_print_path
if running_on_linux()==True:
import pyinotify
else:
import win32file
import win32con
FILE_LIST_DIRECTORY = 0x0001
class _IdleObject(gobject.GObject):
def __init__(self):
gobject.GObject.__init__(self)
def emit(self, *args):
gobject.idle_add(gobject.GObject.emit,self,*args)
class _FooThread(threading.Thread, _IdleObject):
__gsignals__ = {
"file_changed": (
gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [])
}
def __init__(self, *args):
threading.Thread.__init__(self)
_IdleObject.__init__(self)
self.notifier=False
def onChange(self,ev):
if running_on_linux()==True:
file_name=os.path.basename(ev.pathname)
else:
file_name=os.path.basename(ev)
file_name=file_name.rstrip()
self.thread_data[0]
self.thread_data[0]=file_name
print "thread: file changed"
self.emit("file_changed")
def set_watch_path(self,path,thread_data):
self.watch_path=path
self.thread_data=thread_data
def run(self):
if running_on_linux()==True:
print "thread: start"
wm = pyinotify.WatchManager()
print "wathcing path",self.watch_path
ret=wm.add_watch(self.watch_path, pyinotify.IN_CLOSE_WRITE, self.onChange,False,False)
print ret
print "thread: start notifyer",self.notifier
self.notifier = pyinotify.Notifier(wm)
try:
while 1:
self.notifier.process_events()
if self.notifier.check_events():
self.notifier.read_events()
#self.notifier.loop()
except:
print "error in notify",sys.exc_info()[0]
else:
hDir = win32file.CreateFile (self.watch_path,FILE_LIST_DIRECTORY,win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,None,win32con.OPEN_EXISTING,win32con.FILE_FLAG_BACKUP_SEMANTICS,None)
while 1:
results = win32file.ReadDirectoryChangesW (hDir,1024,True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None)
for action, file in results:
full_filename = os.path.join (self.watch_path, file)
self.onChange(full_filename)
def stop(self):
print "thread: stop been called",threading.currentThread()
if running_on_linux()==True:
self.notifier.stop()
self.notifier=False
print "thread:I have shutdown the notifyer",threading.currentThread()
| gpl-2.0 | -5,030,337,655,141,456,000 | 28.92623 | 219 | 0.72172 | false |
fernandog/Sick-Beard | sickbeard/webserve.py | 1 | 149286 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import naming
from sickbeard import scene_exceptions
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings, qualityPresetStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.common import SD, HD720p, HD1080p
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import subliminal
import network_timezones
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
# KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "gui/"+sickbeard.GUI_NAME+"/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **KWs):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = log
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
cur_show_results = myDB.select("SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN ("+','.join(['?']*len(status_list))+")", [int(tvdb_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
@cherrypy.expose
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
else:
status_list = []
t = PageTemplate(file="manage_episodeStatuses.tmpl")
t.submenu = ManageMenu
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_change:
to_change[tvdb_id] = []
to_change[tvdb_id].append(what)
myDB = db.DBConnection()
for cur_tvdb_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_tvdb_id]:
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND showid = ?", status_list + [cur_tvdb_id])
all_eps = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
to_change[cur_tvdb_id] = all_eps
Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True)
redirect('/manage/episodeStatuses')
@cherrypy.expose
def showSubtitleMissed(self, tvdb_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select("SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(tvdb_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not cur_result["subtitles"] == '' else ''
return json.dumps(result)
@cherrypy.expose
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(file="manage_subtitleMissed.tmpl")
t.submenu = ManageMenu
t.whichSubs = whichSubs
if not whichSubs:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_download:
to_download[tvdb_id] = []
to_download[tvdb_id].append(what)
for cur_tvdb_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_tvdb_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_tvdb_id])
to_download[cur_tvdb_id] = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_tvdb_id]:
season, episode = epResult.split('x');
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_tvdb_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
helpers.chmodAsParent(subtitle.path)
redirect('/manage/subtitleMissed')
@cherrypy.expose
def backlogShow(self, tvdb_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) #@UndefinedVariable
redirect("/manage/backlogOverview")
@cherrypy.expose
def backlogOverview(self):
t = PageTemplate(file="manage_backlogOverview.tmpl")
t.submenu = ManageMenu
myDB = db.DBConnection()
showCounts = {}
showCats = {}
showSQLResults = {}
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"]))
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.tvdbid] = epCounts
showCats[curShow.tvdbid] = epCats
showSQLResults[curShow.tvdbid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return _munge(t)
@cherrypy.expose
def massEdit(self, toEdit=None):
t = PageTemplate(file="manage_massEdit.tmpl")
t.submenu = ManageMenu
if not toEdit:
redirect("/manage")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (curShow.paused, None):
paused_all_same = False
else:
last_paused = curShow.paused
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
t.showList = toEdit
t.paused_value = last_paused if paused_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.root_dir_list = root_dir_list
return _munge(t)
@cherrypy.expose
def massEditSubmit(self, paused=None, flatten_folders=None, quality_preset=False, subtitles=None,
anyQualities=[], bestQualities=[], toEdit=None, *args, **kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_'+which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(u"For show "+showObj.name+" changing dir from "+showObj._location+" to "+new_show_dir)
else:
new_show_dir = showObj._location
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
exceptions_list = []
curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, new_flatten_folders, new_paused, subtitles=new_subtitles, directCall=True)
if curErrors:
logger.log(u"Errors: "+str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
redirect("/manage")
@cherrypy.expose
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None, toSubtitle=None):
if toUpdate != None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh != None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename != None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle != None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete != None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toMetadata != None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate+toRefresh+toRename+toSubtitle+toDelete+toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj == None:
continue
if curShowID in toDelete:
showObj.deleteShow()
# don't do anything else if it's being deleted
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show "+showObj.name+": "+ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show "+showObj.name+": "+ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) #@UndefinedVariable
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) #@UndefinedVariable
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates+refreshes+renames+subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
redirect("/manage")
class History:
@cherrypy.expose
def index(self, limit=100):
myDB = db.DBConnection()
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.tvdb_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
if limit == "0":
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC")
else:
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit])
t = PageTemplate(file="history.tmpl")
t.historyResults = sqlResults
t.limit = limit
t.submenu = [
{ 'title': 'Clear History', 'path': 'history/clearHistory' },
{ 'title': 'Trim History', 'path': 'history/trimHistory' },
]
return _munge(t)
@cherrypy.expose
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
redirect("/history")
@cherrypy.expose
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < "+str((datetime.datetime.today()-datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
redirect("/history")
ConfigMenu = [
{ 'title': 'General', 'path': 'config/general/' },
{ 'title': 'Search Settings', 'path': 'config/search/' },
{ 'title': 'Search Providers', 'path': 'config/providers/' },
{ 'title': 'Subtitles Settings','path': 'config/subtitles/' },
{ 'title': 'Post Processing', 'path': 'config/postProcessing/' },
{ 'title': 'Notifications', 'path': 'config/notifications/' },
]
class ConfigGeneral:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_general.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
@cherrypy.expose
def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities, subtitles):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
if defaultFlattenFolders == "true":
defaultFlattenFolders = 1
else:
defaultFlattenFolders = 0
sickbeard.FLATTEN_FOLDERS_DEFAULT = int(defaultFlattenFolders)
if subtitles == "true":
subtitles = 1
else:
subtitles = 0
sickbeard.SUBTITLES_DEFAULT = int(subtitles)
@cherrypy.expose
def generateKey(self):
""" Return a new randomized API_KEY
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
@cherrypy.expose
def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None,
update_shows_on_start=None, launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, sort_article=None):
results = []
if web_ipv6 == "on":
web_ipv6 = 1
else:
web_ipv6 = 0
if web_log == "on":
web_log = 1
else:
web_log = 0
if update_shows_on_start == "on":
update_shows_on_start = 1
else:
update_shows_on_start = 0
if sort_article == "on":
sort_article = 1
else:
sort_article = 0
if launch_browser == "on":
launch_browser = 1
else:
launch_browser = 0
if version_notify == "on":
version_notify = 1
else:
version_notify = 0
if not config.change_LOG_DIR(log_dir):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log dir not changed."]
sickbeard.UPDATE_SHOWS_ON_START = update_shows_on_start
sickbeard.LAUNCH_BROWSER = launch_browser
sickbeard.SORT_ARTICLE = sort_article
sickbeard.WEB_PORT = int(web_port)
sickbeard.WEB_IPV6 = web_ipv6
sickbeard.WEB_LOG = web_log
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
if use_api == "on":
use_api = 1
else:
use_api = 0
sickbeard.USE_API = use_api
sickbeard.API_KEY = api_key
if enable_https == "on":
enable_https = 1
else:
enable_https = 0
sickbeard.ENABLE_HTTPS = enable_https
if not config.change_HTTPS_CERT(https_cert):
results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert dir not changed."]
if not config.change_HTTPS_KEY(https_key):
results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key dir not changed."]
config.change_VERSION_NOTIFY(version_notify)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/general/")
class ConfigSearch:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_search.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_password=None, nzbget_category=None, nzbget_host=None,
nzb_method=None, torrent_method=None, usenet_retention=None, search_frequency=None, download_propers=None, allow_high_priority=None,
torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None, torrent_label=None, torrent_path=None,
torrent_ratio=None, torrent_paused=None, torrent_high_bandwidth=None, ignore_words=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_SEARCH_FREQUENCY(search_frequency)
if download_propers == "on":
download_propers = 1
else:
download_propers = 0
if allow_high_priority == "on":
allow_high_priority = 1
else:
allow_high_priority = 0
if use_nzbs == "on":
use_nzbs = 1
else:
use_nzbs = 0
if use_torrents == "on":
use_torrents = 1
else:
use_torrents = 0
if usenet_retention == None:
usenet_retention = 200
if ignore_words == None:
ignore_words = ""
sickbeard.USE_NZBS = use_nzbs
sickbeard.USE_TORRENTS = use_torrents
sickbeard.NZB_METHOD = nzb_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = int(usenet_retention)
sickbeard.IGNORE_WORDS = ignore_words
sickbeard.DOWNLOAD_PROPERS = download_propers
sickbeard.ALLOW_HIGH_PRIORITY = allow_high_priority
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
if sab_host and not re.match('https?://.*', sab_host):
sab_host = 'http://' + sab_host
if not sab_host.endswith('/'):
sab_host = sab_host + '/'
sickbeard.SAB_HOST = sab_host
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = nzbget_host
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_PATH = torrent_path
sickbeard.TORRENT_RATIO = torrent_ratio
if torrent_paused == "on":
torrent_paused = 1
else:
torrent_paused = 0
sickbeard.TORRENT_PAUSED = torrent_paused
if torrent_high_bandwidth == "on":
torrent_high_bandwidth = 1
else:
torrent_high_bandwidth = 0
sickbeard.TORRENT_HIGH_BANDWIDTH = torrent_high_bandwidth
if torrent_host and not re.match('https?://.*', torrent_host):
torrent_host = 'http://' + torrent_host
if not torrent_host.endswith('/'):
torrent_host = torrent_host + '/'
sickbeard.TORRENT_HOST = torrent_host
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/search/")
class ConfigPostProcessing:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_postProcessing.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
xbmc_data=None, mediabrowser_data=None, synology_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None, mede8er_data=None,
use_banner=None, keep_processed_dir=None, process_automatically=None, rename_episodes=None,
move_associated_files=None, tv_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None, naming_strip_year=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
if use_banner == "on":
use_banner = 1
else:
use_banner = 0
if process_automatically == "on":
process_automatically = 1
else:
process_automatically = 0
if rename_episodes == "on":
rename_episodes = 1
else:
rename_episodes = 0
if keep_processed_dir == "on":
keep_processed_dir = 1
else:
keep_processed_dir = 0
if move_associated_files == "on":
move_associated_files = 1
else:
move_associated_files = 0
if naming_custom_abd == "on":
naming_custom_abd = 1
else:
naming_custom_abd = 0
if naming_strip_year == "on":
naming_strip_year = 1
else:
naming_strip_year = 0
sickbeard.PROCESS_AUTOMATICALLY = process_automatically
sickbeard.KEEP_PROCESSED_DIR = keep_processed_dir
sickbeard.RENAME_EPISODES = rename_episodes
sickbeard.MOVE_ASSOCIATED_FILES = move_associated_files
sickbeard.NAMING_CUSTOM_ABD = naming_custom_abd
sickbeard.NAMING_STRIP_YEAR = naming_strip_year
sickbeard.metadata_provider_dict['XBMC'].set_config(xbmc_data)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(mediabrowser_data)
sickbeard.metadata_provider_dict['Synology'].set_config(synology_data)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sony_ps3_data)
sickbeard.metadata_provider_dict['WDTV'].set_config(wdtv_data)
sickbeard.metadata_provider_dict['TIVO'].set_config(tivo_data)
sickbeard.metadata_provider_dict['Mede8er'].set_config(mede8er_data)
if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
else:
results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
sickbeard.USE_BANNER = use_banner
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/postProcessing/")
@cherrypy.expose
def testNaming(self, pattern=None, multi=None, abd=False):
if multi != None:
multi = int(multi)
result = naming.test_name(pattern, multi, abd)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
@cherrypy.expose
def isNamingValid(self, pattern=None, multi=None, abd=False):
if pattern == None:
return "invalid"
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
class ConfigProviders:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_providers.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as '+providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
@cherrypy.expose
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
if not url.endswith('/'):
url = url + '/'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = url
providerDict[name].key = key
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
@cherrypy.expose
def deleteNewznabProvider(self, id):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if id not in providerDict or providerDict[id].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
@cherrypy.expose
def saveProviders(self, nzbmatrix_username=None, nzbmatrix_apikey=None,
nzbs_r_us_uid=None, nzbs_r_us_hash=None, newznab_string='',
omgwtfnzbs_uid=None, omgwtfnzbs_key=None,
tvtorrents_digest=None, tvtorrents_hash=None,
btn_api_key=None,
dtt_norar = None, dtt_single = None,
thepiratebay_trusted=None, thepiratebay_proxy=None, thepiratebay_proxy_url=None,
torrentleech_username=None, torrentleech_password=None,
iptorrents_username=None, iptorrents_password=None, iptorrents_freeleech=None,
kat_trusted = None, kat_verified = None,
newzbin_username=None, newzbin_password=None,
provider_order=None):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
if newznab_string:
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
curName, curURL, curKey = curNewznabProviderStr.split('|')
newProvider = newznab.NewznabProvider(curName, curURL, curKey)
curID = newProvider.getID()
# if it already exists then update it
if curID in newznabProviderDict:
newznabProviderDict[curID].name = curName
newznabProviderDict[curID].url = curURL
newznabProviderDict[curID].key = curKey
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = int(curEnabled)
provider_list.append(curProvider)
if curProvider == 'nzbs_r_us':
sickbeard.NZBSRUS = curEnabled
elif curProvider == 'nzbs_org_old':
sickbeard.NZBS = curEnabled
elif curProvider == 'nzbmatrix':
sickbeard.NZBMATRIX = curEnabled
elif curProvider == 'newzbin':
sickbeard.NEWZBIN = curEnabled
elif curProvider == 'bin_req':
sickbeard.BINREQ = curEnabled
elif curProvider == 'womble_s_index':
sickbeard.WOMBLE = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'ezrss':
sickbeard.EZRSS = curEnabled
elif curProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
elif curProvider == 'dailytvtorrents':
sickbeard.DTT = curEnabled
elif curProvider == 'thepiratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'iptorrents':
sickbeard.IPTORRENTS = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'kickasstorrents':
sickbeard.KAT = curEnabled
else:
logger.log(u"don't know what "+curProvider+" is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
if dtt_norar == "on":
dtt_norar = 1
else:
dtt_norar = 0
sickbeard.DTT_NORAR = dtt_norar
if dtt_single == "on":
dtt_single = 1
else:
dtt_single = 0
sickbeard.DTT_SINGLE = dtt_single
if thepiratebay_trusted == "on":
thepiratebay_trusted = 1
else:
thepiratebay_trusted = 0
sickbeard.THEPIRATEBAY_TRUSTED = thepiratebay_trusted
if thepiratebay_proxy == "on":
thepiratebay_proxy = 1
sickbeard.THEPIRATEBAY_PROXY_URL = thepiratebay_proxy_url.strip()
else:
thepiratebay_proxy = 0
sickbeard.THEPIRATEBAY_PROXY_URL = ""
sickbeard.THEPIRATEBAY_PROXY = thepiratebay_proxy
sickbeard.TORRENTLEECH_USERNAME = torrentleech_username
sickbeard.TORRENTLEECH_PASSWORD = torrentleech_password
sickbeard.IPTORRENTS_USERNAME = iptorrents_username.strip()
sickbeard.IPTORRENTS_PASSWORD = iptorrents_password.strip()
if iptorrents_freeleech == "on":
iptorrents_freeleech = 1
else:
iptorrents_freeleech = 0
sickbeard.IPTORRENTS_FREELEECH = iptorrents_freeleech
if kat_trusted == "on":
kat_trusted = 1
else:
kat_trusted = 0
sickbeard.KAT_TRUSTED = kat_trusted
if kat_verified == "on":
kat_verified = 1
else:
kat_verified = 0
sickbeard.KAT_VERIFIED = kat_verified
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_notify_onsubtitledownload=None, xbmc_update_onlyfirst=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_notifo=None, notifo_notify_onsnatch=None, notifo_notify_ondownload=None, notifo_notify_onsubtitledownload=None, notifo_username=None, notifo_apisecret=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None, trakt_remove_watchlist=None, trakt_use_watchlist=None, trakt_method_add=None, trakt_start_paused=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_email=None, email_notify_onsnatch=None, email_notify_ondownload=None, email_notify_onsubtitledownload=None, email_host=None, email_port=25, email_from=None,
email_tls=None, email_user=None, email_password=None, email_list=None, email_show_list=None, email_show=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_update_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_notify_onsnatch = 1
else:
plex_notify_onsnatch = 0
if plex_notify_ondownload == "on":
plex_notify_ondownload = 1
else:
plex_notify_ondownload = 0
if plex_notify_onsubtitledownload == "on":
plex_notify_onsubtitledownload = 1
else:
plex_notify_onsubtitledownload = 0
if use_plex == "on":
use_plex = 1
else:
use_plex = 0
if growl_notify_onsnatch == "on":
growl_notify_onsnatch = 1
else:
growl_notify_onsnatch = 0
if growl_notify_ondownload == "on":
growl_notify_ondownload = 1
else:
growl_notify_ondownload = 0
if growl_notify_onsubtitledownload == "on":
growl_notify_onsubtitledownload = 1
else:
growl_notify_onsubtitledownload = 0
if use_growl == "on":
use_growl = 1
else:
use_growl = 0
if prowl_notify_onsnatch == "on":
prowl_notify_onsnatch = 1
else:
prowl_notify_onsnatch = 0
if prowl_notify_ondownload == "on":
prowl_notify_ondownload = 1
else:
prowl_notify_ondownload = 0
if prowl_notify_onsubtitledownload == "on":
prowl_notify_onsubtitledownload = 1
else:
prowl_notify_onsubtitledownload = 0
if use_prowl == "on":
use_prowl = 1
else:
use_prowl = 0
if twitter_notify_onsnatch == "on":
twitter_notify_onsnatch = 1
else:
twitter_notify_onsnatch = 0
if twitter_notify_ondownload == "on":
twitter_notify_ondownload = 1
else:
twitter_notify_ondownload = 0
if twitter_notify_onsubtitledownload == "on":
twitter_notify_onsubtitledownload = 1
else:
twitter_notify_onsubtitledownload = 0
if use_twitter == "on":
use_twitter = 1
else:
use_twitter = 0
if notifo_notify_onsnatch == "on":
notifo_notify_onsnatch = 1
else:
notifo_notify_onsnatch = 0
if notifo_notify_ondownload == "on":
notifo_notify_ondownload = 1
else:
notifo_notify_ondownload = 0
if notifo_notify_onsubtitledownload == "on":
notifo_notify_onsubtitledownload = 1
else:
notifo_notify_onsubtitledownload = 0
if use_notifo == "on":
use_notifo = 1
else:
use_notifo = 0
if boxcar_notify_onsnatch == "on":
boxcar_notify_onsnatch = 1
else:
boxcar_notify_onsnatch = 0
if boxcar_notify_ondownload == "on":
boxcar_notify_ondownload = 1
else:
boxcar_notify_ondownload = 0
if boxcar_notify_onsubtitledownload == "on":
boxcar_notify_onsubtitledownload = 1
else:
boxcar_notify_onsubtitledownload = 0
if use_boxcar == "on":
use_boxcar = 1
else:
use_boxcar = 0
if pushover_notify_onsnatch == "on":
pushover_notify_onsnatch = 1
else:
pushover_notify_onsnatch = 0
if pushover_notify_ondownload == "on":
pushover_notify_ondownload = 1
else:
pushover_notify_ondownload = 0
if pushover_notify_onsubtitledownload == "on":
pushover_notify_onsubtitledownload = 1
else:
pushover_notify_onsubtitledownload = 0
if use_pushover == "on":
use_pushover = 1
else:
use_pushover = 0
if use_nmj == "on":
use_nmj = 1
else:
use_nmj = 0
if use_nmjv2 == "on":
use_nmjv2 = 1
else:
use_nmjv2 = 0
if use_synoindex == "on":
use_synoindex = 1
else:
use_synoindex = 0
if use_synologynotifier == "on":
use_synologynotifier = 1
else:
use_synologynotifier = 0
if synologynotifier_notify_onsnatch == "on":
synologynotifier_notify_onsnatch = 1
else:
synologynotifier_notify_onsnatch = 0
if synologynotifier_notify_ondownload == "on":
synologynotifier_notify_ondownload = 1
else:
synologynotifier_notify_ondownload = 0
if synologynotifier_notify_onsubtitledownload == "on":
synologynotifier_notify_onsubtitledownload = 1
else:
synologynotifier_notify_onsubtitledownload = 0
if use_trakt == "on":
use_trakt = 1
else:
use_trakt = 0
if trakt_remove_watchlist == "on":
trakt_remove_watchlist = 1
else:
trakt_remove_watchlist = 0
if trakt_use_watchlist == "on":
trakt_use_watchlist = 1
else:
trakt_use_watchlist = 0
if trakt_start_paused == "on":
trakt_start_paused = 1
else:
trakt_start_paused = 0
if email_notify_onsnatch == "on":
email_notify_onsnatch = 1
else:
email_notify_onsnatch = 0
if email_notify_ondownload == "on":
email_notify_ondownload = 1
else:
email_notify_ondownload = 0
if email_notify_onsubtitledownload == "on":
email_notify_onsubtitledownload = 1
else:
email_notify_onsubtitledownload = 0
if use_email == "on":
use_email = 1
else:
use_email = 0
if email_tls == "on":
email_tls = 1
else:
email_tls = 0
# Update per show notifications, if provided
if int(email_show) >= 0:
mydb = db.DBConnection()
mydb.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", (email_show_list, int(email_show)))
if use_pytivo == "on":
use_pytivo = 1
else:
use_pytivo = 0
if pytivo_notify_onsnatch == "on":
pytivo_notify_onsnatch = 1
else:
pytivo_notify_onsnatch = 0
if pytivo_notify_ondownload == "on":
pytivo_notify_ondownload = 1
else:
pytivo_notify_ondownload = 0
if pytivo_notify_onsubtitledownload == "on":
pytivo_notify_onsubtitledownload = 1
else:
pytivo_notify_onsubtitledownload = 0
if pytivo_update_library == "on":
pytivo_update_library = 1
else:
pytivo_update_library = 0
if use_nma == "on":
use_nma = 1
else:
use_nma = 0
if nma_notify_onsnatch == "on":
nma_notify_onsnatch = 1
else:
nma_notify_onsnatch = 0
if nma_notify_ondownload == "on":
nma_notify_ondownload = 1
else:
nma_notify_ondownload = 0
if nma_notify_onsubtitledownload == "on":
nma_notify_onsubtitledownload = 1
else:
nma_notify_onsubtitledownload = 0
if use_pushalot == "on":
use_pushalot = 1
else:
use_pushalot = 0
if pushalot_notify_onsnatch == "on":
pushalot_notify_onsnatch = 1
else:
pushalot_notify_onsnatch = 0
if pushalot_notify_ondownload == "on":
pushalot_notify_ondownload = 1
else:
pushalot_notify_ondownload = 0
if pushalot_notify_onsubtitledownload == "on":
pushalot_notify_onsubtitledownload = 1
else:
pushalot_notify_onsubtitledownload = 0
sickbeard.USE_XBMC = use_xbmc
sickbeard.XBMC_NOTIFY_ONSNATCH = xbmc_notify_onsnatch
sickbeard.XBMC_NOTIFY_ONDOWNLOAD = xbmc_notify_ondownload
sickbeard.XBMC_NOTIFY_ONSUBTITLEDOWNLOAD = xbmc_notify_onsubtitledownload
sickbeard.XBMC_UPDATE_LIBRARY = xbmc_update_library
sickbeard.XBMC_UPDATE_FULL = xbmc_update_full
sickbeard.XBMC_UPDATE_ONLYFIRST = xbmc_update_onlyfirst
sickbeard.XBMC_HOST = xbmc_host
sickbeard.XBMC_USERNAME = xbmc_username
sickbeard.XBMC_PASSWORD = xbmc_password
sickbeard.USE_PLEX = use_plex
sickbeard.PLEX_NOTIFY_ONSNATCH = plex_notify_onsnatch
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = plex_notify_ondownload
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = plex_notify_onsubtitledownload
sickbeard.PLEX_UPDATE_LIBRARY = plex_update_library
sickbeard.PLEX_HOST = plex_host
sickbeard.PLEX_SERVER_HOST = plex_server_host
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = use_growl
sickbeard.GROWL_NOTIFY_ONSNATCH = growl_notify_onsnatch
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = growl_notify_ondownload
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = growl_notify_onsubtitledownload
sickbeard.GROWL_HOST = growl_host
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_PROWL = use_prowl
sickbeard.PROWL_NOTIFY_ONSNATCH = prowl_notify_onsnatch
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = prowl_notify_ondownload
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = prowl_notify_onsubtitledownload
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = use_twitter
sickbeard.TWITTER_NOTIFY_ONSNATCH = twitter_notify_onsnatch
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = twitter_notify_ondownload
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = twitter_notify_onsubtitledownload
sickbeard.USE_NOTIFO = use_notifo
sickbeard.NOTIFO_NOTIFY_ONSNATCH = notifo_notify_onsnatch
sickbeard.NOTIFO_NOTIFY_ONDOWNLOAD = notifo_notify_ondownload
sickbeard.NOTIFO_NOTIFY_ONSUBTITLEDOWNLOAD = notifo_notify_onsubtitledownload
sickbeard.NOTIFO_USERNAME = notifo_username
sickbeard.NOTIFO_APISECRET = notifo_apisecret
sickbeard.USE_BOXCAR = use_boxcar
sickbeard.BOXCAR_NOTIFY_ONSNATCH = boxcar_notify_onsnatch
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = boxcar_notify_ondownload
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar_notify_onsubtitledownload
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_PUSHOVER = use_pushover
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = pushover_notify_onsnatch
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = pushover_notify_ondownload
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = pushover_notify_onsubtitledownload
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.USE_LIBNOTIFY = use_libnotify == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = libnotify_notify_onsnatch == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = libnotify_notify_ondownload == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = libnotify_notify_onsubtitledownload == "on"
sickbeard.USE_NMJ = use_nmj
sickbeard.NMJ_HOST = nmj_host
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_NMJv2 = use_nmjv2
sickbeard.NMJv2_HOST = nmjv2_host
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_SYNOINDEX = use_synoindex
sickbeard.USE_SYNOLOGYNOTIFIER = use_synologynotifier
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = synologynotifier_notify_onsnatch
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = synologynotifier_notify_ondownload
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = synologynotifier_notify_onsubtitledownload
sickbeard.USE_TRAKT = use_trakt
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_API = trakt_api
sickbeard.TRAKT_REMOVE_WATCHLIST = trakt_remove_watchlist
sickbeard.TRAKT_USE_WATCHLIST = trakt_use_watchlist
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_START_PAUSED = trakt_start_paused
sickbeard.USE_EMAIL = use_email
sickbeard.EMAIL_NOTIFY_ONSNATCH = email_notify_onsnatch
sickbeard.EMAIL_NOTIFY_ONDOWNLOAD = email_notify_ondownload
sickbeard.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = email_notify_ondownload
sickbeard.EMAIL_HOST = email_host
sickbeard.EMAIL_PORT = email_port
sickbeard.EMAIL_FROM = email_from
sickbeard.EMAIL_TLS = email_tls
sickbeard.EMAIL_USER = email_user
sickbeard.EMAIL_PASSWORD = email_password
sickbeard.EMAIL_LIST = email_list
sickbeard.USE_PYTIVO = use_pytivo
sickbeard.PYTIVO_NOTIFY_ONSNATCH = pytivo_notify_onsnatch == "off"
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = pytivo_notify_ondownload == "off"
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = pytivo_notify_onsubtitledownload == "off"
sickbeard.PYTIVO_UPDATE_LIBRARY = pytivo_update_library
sickbeard.PYTIVO_HOST = pytivo_host
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = use_nma
sickbeard.NMA_NOTIFY_ONSNATCH = nma_notify_onsnatch
sickbeard.NMA_NOTIFY_ONDOWNLOAD = nma_notify_ondownload
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = nma_notify_onsubtitledownload
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_PUSHALOT = use_pushalot
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = pushalot_notify_onsnatch
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = pushalot_notify_ondownload
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = pushalot_notify_onsubtitledownload
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/notifications/")
class ConfigSubtitles:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_subtitles.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSubtitles(self, use_subtitles=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, service_order=None, subtitles_history=None):
results = []
if use_subtitles == "on":
use_subtitles = 1
if sickbeard.subtitlesFinderScheduler.thread == None or not sickbeard.subtitlesFinderScheduler.thread.isAlive():
sickbeard.subtitlesFinderScheduler.initThread()
else:
use_subtitles = 0
sickbeard.subtitlesFinderScheduler.abort = True
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
sickbeard.subtitlesFinderScheduler.thread.join(5)
except:
pass
if subtitles_history == "on":
subtitles_history = 1
else:
subtitles_history = 0
sickbeard.USE_SUBTITLES = use_subtitles
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_HISTORY = subtitles_history
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/subtitles/")
class Config:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config.tmpl")
t.submenu = ConfigMenu
t.hasChangeLog = False
# read changelog file, if present, and format it to be used for tooltip
data = []
if os.path.isfile('CHANGELOG.txt'):
t.hasChangeLog = True
f = ek.ek(open, 'CHANGELOG.txt')
data = f.readlines()
f.close()
numLines = 0
finalData = []
for x in data:
x = x.decode('utf-8').replace('\n', '<br/>')
# why 3? we want to skip to the acutal changelog details
if numLines > 3:
finalData.append(x)
numLines += 1
result = "".join(finalData)
t.logLines = re.escape(result)
return _munge(t)
general = ConfigGeneral()
search = ConfigSearch()
postProcessing = ConfigPostProcessing()
providers = ConfigProviders()
notifications = ConfigNotifications()
subtitles = ConfigSubtitles()
def haveXBMC():
return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY
def havePLEX():
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def HomeMenu():
return [
{ 'title': 'Add Shows', 'path': 'home/addShows/', },
{ 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' },
{ 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC },
{ 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX },
{ 'title': 'Restart', 'path': 'home/restart/?pid='+str(sickbeard.PID), 'confirm': True },
{ 'title': 'Shutdown', 'path': 'home/shutdown/?pid='+str(sickbeard.PID), 'confirm': True },
]
class HomePostProcess:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_postprocess.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None):
if not dir:
redirect("/home/postprocess")
else:
result = processTV.processDir(dir, nzbName)
if quiet != None and int(quiet) == 1:
return result
result = result.replace("\n","<br />\n")
return _genericMessage("Postprocessing results", result)
class NewHomeAddShows:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_addShows.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def getTVDBLanguages(self):
result = tvdb_api.Tvdb().config['valid_languages']
# Make sure list is sorted alphabetically but 'en' is in front
if 'en' in result:
del result[result.index('en')]
result.sort()
result.insert(0, 'en')
return json.dumps({'results': result})
@cherrypy.expose
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
@cherrypy.expose
def searchTVDBForShowName(self, name, lang="en"):
if not lang or lang == 'null':
lang = "en"
baseURL = "http://thetvdb.com/api/GetSeries.php?"
nameUTF8 = name.encode('utf-8')
logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG)
# Use each word in the show's name as a possible search term
keywords = nameUTF8.split(' ')
# Insert the whole show's name as the first search term so best results are first
# ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name']
if len(keywords) > 1:
keywords.insert(0, nameUTF8)
# Query the TVDB for each search term and build the list of results
results = []
for searchTerm in keywords:
params = {'seriesname': searchTerm,
'language': lang}
finalURL = baseURL + urllib.urlencode(params)
logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG)
urlData = helpers.getURL(finalURL)
if urlData is None:
# When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords
logger.log(u"Unable to get URL: " + finalURL, logger.ERROR)
break
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
series = seriesXML.getiterator('Series')
except Exception, e:
# use finalURL in log, because urlData can be too much information
logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR)
series = ''
# add each result to our list
for curSeries in series:
tvdb_id = int(curSeries.findtext('seriesid'))
# don't add duplicates
if tvdb_id in [x[0] for x in results]:
continue
results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired')))
lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang]
return json.dumps({'results': results, 'langid': lang_id})
@cherrypy.expose
def massAddTable(self, rootDir=None):
t = PageTemplate(file="home_massAddTable.tmpl")
t.submenu = HomeMenu()
myDB = db.DBConnection()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp]+root_dirs
dir_list = []
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>'+ek.ek(os.path.dirname, cur_path)+os.sep+'</b>'+ek.ek(os.path.basename, cur_path),
}
# see if the folder is in XBMC already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
tvdb_id = ''
show_name = ''
for cur_provider in sickbeard.metadata_provider_dict.values():
(tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path)
if tvdb_id and show_name:
break
cur_dir['existing_info'] = (tvdb_id, show_name)
if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return _munge(t)
@cherrypy.expose
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(file="home_newShow.tmpl")
t.submenu = HomeMenu()
show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add)
if tvdb_id and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & TVDB ID
t.use_provided_info = use_provided_info
# use the given show_dir for the tvdb search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.',' ')
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_tvdb_id = tvdb_id
t.provided_tvdb_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
return _munge(t)
@cherrypy.expose
def addNewShow(self, whichSeries=None, tvdbLang="en", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
fullShowPath=None, other_shows=None, skipShow=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
redirect('/home')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no tvdb id or folder:"+repr(whichSeries)+" and "+repr(rootDir)+"/"+repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.partition('|')
if len(series_pieces) < 3:
return "Error with show selection."
tvdb_id = int(series_pieces[0])
show_name = series_pieces[2]
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder "+show_dir+" exists already")
redirect('/home/addShows/existingShows')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of "+show_dir+" due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder "+show_dir+", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show", "Unable to create the folder "+show_dir+", can't add the show")
redirect("/home")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(helpers.tryInt, anyQualities), map(helpers.tryInt, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, subtitles, tvdbLang) #@UndefinedVariable
ui.notifications.message('Show added', 'Adding the specified show into '+show_dir)
return finishAddShow()
@cherrypy.expose
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(file="home_addExistingShow.tmpl")
t.submenu = HomeMenu()
return _munge(t)
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 3:
return (extra_show, None, None)
show_dir = split_vals[0]
tvdb_id = split_vals[1]
show_name = '|'.join(split_vals[2:])
return (show_dir, tvdb_id, show_name)
@cherrypy.expose
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
if promptForSettings == "on":
promptForSettings = 1
else:
promptForSettings = 0
tvdb_id_given = []
dirs_only = []
# separate all the ones with TVDB IDs
for cur_dir in shows_to_add:
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not tvdb_id or not show_name:
continue
tvdb_id_given.append((show_dir, int(tvdb_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in tvdb_id_given:
show_dir, tvdb_id, show_name = cur_show
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, SKIPPED, sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT, sickbeard.SUBTITLES_DEFAULT) #@UndefinedVariable
num_added += 1
if num_added:
ui.notifications.message("Shows Added", "Automatically added "+str(num_added)+" from their existing metadata files")
# if we're done then go home
if not dirs_only:
redirect('/home')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
ErrorLogsMenu = [
{ 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors' },
#{ 'title': 'View Log', 'path': 'errorlogs/viewlog' },
]
class ErrorLogs:
@cherrypy.expose
def index(self):
t = PageTemplate(file="errorlogs.tmpl")
t.submenu = ErrorLogsMenu
return _munge(t)
@cherrypy.expose
def clearerrors(self):
classes.ErrorViewer.clear()
redirect("/errorlogs")
@cherrypy.expose
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
t = PageTemplate(file="viewlogs.tmpl")
t.submenu = ErrorLogsMenu
minLevel = int(minLevel)
data = []
if os.path.isfile(logger.sb_log_instance.log_file):
f = ek.ek(open, logger.sb_log_instance.log_file)
data = f.readlines()
f.close()
regex = "^(\w{3})\\.?-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$"
finalData = []
numLines = 0
lastLine = False
numToShow = min(maxLines, len(data))
for x in reversed(data):
x = x.decode('utf-8', 'replace')
match = re.match(regex, x)
if match:
level = match.group(6)
if level not in logger.reverseNames:
lastLine = False
continue
if logger.reverseNames[level] >= minLevel:
lastLine = True
finalData.append(x)
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA"+x)
numLines += 1
if numLines >= numToShow:
break
result = "".join(finalData)
t.logLines = result
t.minLevel = minLevel
return _munge(t)
class Home:
@cherrypy.expose
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring."
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
cherrypy.response.headers['Content-Type'] = 'text/javascript'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
if sickbeard.started:
return callback+'('+json.dumps({"msg": str(sickbeard.PID)})+');'
else:
return callback+'('+json.dumps({"msg": "nope"})+');'
@cherrypy.expose
def index(self):
t = PageTemplate(file="home.tmpl")
t.submenu = HomeMenu()
return _munge(t)
addShows = NewHomeAddShows()
postprocess = HomePostProcess()
@cherrypy.expose
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
if not host.endswith("/"):
host = host + "/"
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) #@UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '"+accesMsg+"' as authentication method"
else:
return "Unable to connect to host"
@cherrypy.expose
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
if not host.endswith("/"):
host = host + "/"
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
@cherrypy.expose
def testGrowl(self, host=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.growl_notifier.test_notify(host, password)
if password==None or password=='':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully "+urllib.unquote_plus(host)+pw_append
else:
return "Registration and Testing of growl failed "+urllib.unquote_plus(host)+pw_append
@cherrypy.expose
def testProwl(self, prowl_api=None, prowl_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
@cherrypy.expose
def testNotifo(self, username=None, apisecret=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.notifo_notifier.test_notify(username, apisecret)
if result:
return "Notifo notification succeeded. Check your Notifo clients to make sure it worked"
else:
return "Error sending Notifo notification"
@cherrypy.expose
def testBoxcar(self, username=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
@cherrypy.expose
def testPushover(self, userKey=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushover_notifier.test_notify(userKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
return notifiers.twitter_notifier._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: "+str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def testXBMC(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testPLEX(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testLibnotify(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
@cherrypy.expose
def testNMJ(self, host=None, database=None, mount=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfull started the scan update"
else:
return "Test failed to start the scan update"
@cherrypy.expose
def settingsNMJ(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
@cherrypy.expose
def testNMJv2(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
@cherrypy.expose
def settingsNMJv2(self, host=None, dbloc=None,instance=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host),dbloc,instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc}
@cherrypy.expose
def testTrakt(self, api=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.trakt_notifier.test_notify(api, username, password)
if result:
return "Test notice sent successfully to Trakt"
else:
return "Test notice failed to Trakt"
@cherrypy.expose
def loadShowNotifyLists(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
mydb = db.DBConnection()
rows = mydb.select("SELECT show_id, show_name, notify_list FROM tv_shows")
data = {}
size = 0
for r in rows:
data[r['show_id']] = {'id': r['show_id'], 'name': r['show_name'], 'list': r['notify_list']}
size += 1
data['_size'] = size
return json.dumps(data)
@cherrypy.expose
def testEmail(self, host=None, port=None, smtp_from=None, use_tls=None, user=None, pwd=None, to=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.email_notifier.test_notify(host, port, smtp_from, use_tls, user, pwd, to):
return 'Test email sent successfully! Check inbox.'
else:
return 'ERROR: %s' % notifiers.email_notifier.last_err
@cherrypy.expose
def testNMA(self, nma_api=None, nma_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
@cherrypy.expose
def testPushalot(self, authorizationToken=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
@cherrypy.expose
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
threading.Timer(2, sickbeard.invoke_shutdown).start()
title = "Shutting down"
message = "Sick Beard is shutting down..."
return _genericMessage(title, message)
@cherrypy.expose
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
t = PageTemplate(file="restart.tmpl")
t.submenu = HomeMenu()
# do a soft restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
return _munge(t)
@cherrypy.expose
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
updated = sickbeard.versionCheckScheduler.action.update() #@UndefinedVariable
if updated:
# do a hard restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
t = PageTemplate(file="restart_bare.tmpl")
return _munge(t)
else:
return _genericMessage("Update Failed","Update wasn't successful, not restarting. Check your log for more information.")
@cherrypy.expose
def displayShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc",
[showObj.tvdbid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.tvdbid]
)
t = PageTemplate(file="displayShow.tmpl")
t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d'%showObj.tvdbid } ]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
show_message = 'The information below is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): #@UndefinedVariable
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): #@UndefinedVariable
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d'%showObj.tvdbid, 'confirm': True })
t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1'%showObj.tvdbid })
t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?showName=%s'%urllib.quote_plus(showObj.name.encode('utf-8')), 'requires': haveXBMC })
t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d'%showObj.tvdbid })
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj) and showObj.subtitles:
t.submenu.append({ 'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d'%showObj.tvdbid })
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
for curResult in sqlResults:
curEpCat = showObj.getOverview(int(curResult["status"]))
epCats[str(curResult["season"])+"x"+str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
def titler(x):
if not x or sickbeard.SORT_ARTICLE:
return x
if x.lower().startswith('a '):
x = x[2:]
if x.lower().startswith('an '):
x = x[3:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
t.epCounts = epCounts
t.epCats = epCats
return _munge(t)
@cherrypy.expose
def plotDetails(self, show, season, episode):
result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone()
return result['description'] if result else 'Episode not found.'
@cherrypy.expose
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, directCall=False, air_by_date=None, tvdbLang=None, subtitles=None):
if show == None:
errString = "Invalid show ID: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errString = "Unable to find the specified show: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(file="editShow.tmpl")
t.submenu = HomeMenu()
with showObj.lock:
t.show = showObj
return _munge(t)
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
logger.log(u"flatten folders: "+str(flatten_folders))
if paused == "on":
paused = 1
else:
paused = 0
if air_by_date == "on":
air_by_date = 1
else:
air_by_date = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']:
tvdb_lang = tvdbLang
else:
tvdb_lang = showObj.lang
# if we changed the language then kick off an update
if tvdb_lang == showObj.lang:
do_update = False
else:
do_update = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
#If directCall from mass_edit_update no scene exceptions handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: "+ex(e))
showObj.paused = paused
showObj.air_by_date = air_by_date
showObj.subtitles = subtitles
showObj.lang = tvdb_lang
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location)+" != "+os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location):
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:"+ex(e))
# grab updated info from TVDB
#showObj.loadEpisodesFromTVDB()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.tvdbid, exceptions_list) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def deleteShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
showObj.deleteShow()
ui.notifications.message('<b>%s</b> has been deleted' % showObj.name)
redirect("/home")
@cherrypy.expose
def refreshShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) #@UndefinedVariable
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.",
ex(e))
# just give it some time
time.sleep(3)
redirect("/home/displayShow?show=" + str(showObj.tvdbid))
@cherrypy.expose
def subtitleShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateXBMC(self, showName=None):
# only send update to first host in the list -- workaround for xbmc sql backend users
if sickbeard.XBMC_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for xbmc sql backend users
host = sickbeard.XBMC_HOST.split(",")[0].strip()
else:
host = sickbeard.XBMC_HOST
if notifiers.xbmc_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to XBMC host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more XBMC host(s): " + host)
redirect('/home')
@cherrypy.expose
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
redirect('/home')
@cherrypy.expose
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show == None or eps == None or status == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
segment_list = []
if eps != None:
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode "+curEp+" to "+status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if int(status) == WANTED:
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
if ep_segment not in segment_list:
segment_list.append(ep_segment)
if epObj == None:
return _genericMessage("Error", "Episode couldn't be retrieved")
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of "+curEp+" because it is UNAIRED", logger.ERROR)
continue
if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(u"Refusing to change status of "+curEp+" to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR)
continue
epObj.status = int(status)
epObj.saveToDB()
msg = "Backlog was automatically started for the following seasons of <b>"+showObj.name+"</b>:<br />"
for cur_segment in segment_list:
msg += "<li>Season "+str(cur_segment)+"</li>"
logger.log(u"Sending backlog for "+showObj.name+" season "+str(cur_segment)+" because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable
msg += "</ul>"
if segment_list:
ui.notifications.message("Backlog started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def testRename(self, show=None):
if show == None:
return _genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return _munge(t)
@cherrypy.expose
def doRename(self, show=None, eps=None):
if show == None or eps == None:
errMsg = "You must specify a show and at least one episode"
return _genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj == None:
errMsg = "Error", "Show not in show list"
return _genericMessage("Error", errMsg)
try:
show_loc = show_obj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
myDB = db.DBConnection()
if eps == None:
redirect("/home/displayShow?show=" + show)
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for "+curEp+", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def searchEpisode(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
#Find the quality class for the episode
quality_class = Quality.qualityStrings[Quality.UNKNOWN]
ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status)
for x in (SD, HD720p, HD1080p):
if ep_quality in Quality.splitQuality(x)[0]:
quality_class = qualityPresetStrings[x]
break
return json.dumps({'result': statusStrings[ep_obj.status],
'quality': quality_class
})
return json.dumps({'result': 'failure'})
@cherrypy.expose
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = ep_obj.subtitles
try:
subtitles = ep_obj.downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
helpers.chmodAsParent(subtitle.path)
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join(["<img src='"+sickbeard.WEB_ROOT+"/images/flags/"+subliminal.language.Language(x).alpha2+".png' alt='"+subliminal.language.Language(x).name+"'/>" for x in sorted(list(set(ep_obj.subtitles).difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join([x for x in ep_obj.subtitles])})
@cherrypy.expose
def mergeEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do merge subtitles for that episode
try:
ep_obj.mergeSubtitles()
except Exception as e:
return json.dumps({'result': 'failure', 'exception': str(e)})
# return the correct json value
status = 'Subtitles merged successfully '
ui.notifications.message('Merge Subtitles', status)
return json.dumps({'result': 'ok'})
class UI:
@cherrypy.expose
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
@cherrypy.expose
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications():
messages['notification-'+str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
class WebInterface:
@cherrypy.expose
def index(self):
redirect("/home")
@cherrypy.expose
def showPoster(self, show=None, which=None):
#Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', 'slick', 'images', default_image_name)
if show is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
cache_obj = image_cache.ImageCache()
if which == 'poster':
image_file_name = cache_obj.poster_path(showObj.tvdbid)
if which == 'poster_thumb':
image_file_name = cache_obj.poster_thumb_path(showObj.tvdbid)
if which == 'banner':
image_file_name = cache_obj.banner_path(showObj.tvdbid)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(showObj.tvdbid)
if ek.ek(os.path.isfile, image_file_name):
return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg")
else:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
@cherrypy.expose
def setHomeLayout(self, layout):
if layout not in ('poster', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
redirect("/home")
@cherrypy.expose
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list'):
layout = 'banner'
sickbeard.COMING_EPS_LAYOUT = layout
redirect("/comingEpisodes")
@cherrypy.expose
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
redirect("/comingEpisodes")
@cherrypy.expose
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
sickbeard.COMING_EPS_SORT = sort
redirect("/comingEpisodes")
@cherrypy.expose
def comingEpisodes(self, layout="None"):
# get local timezone and load network timezones
sb_timezone = tz.tzlocal()
network_dict = network_timezones.load_network_dict()
myDB = db.DBConnection()
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
sql_results1 = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, next_week] + qualList)
for cur_result in sql_results1:
done_show_list.append(helpers.tryInt(cur_result["showid"]))
more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN ("+','.join(['?']*len(done_show_list))+") AND tv_shows.tvdb_id = outer_eps.showid AND airdate IN (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? AND inner_eps.status NOT IN ("+','.join(['?']*len(Quality.DOWNLOADED+Quality.SNATCHED))+") ORDER BY inner_eps.airdate ASC LIMIT 1)", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results1 += more_sql_results
more_sql_results = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, recently, WANTED] + qualList)
sql_results1 += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results1]
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
# add localtime to the dict
for index, item in enumerate(sql_results1):
mo = time_regex.search(item['airs'])
if mo != None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap != None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(item['airdate']))
foreign_timezone = network_timezones.get_network_timezone(item['network'], network_dict, sb_timezone)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m,tzinfo=foreign_timezone)
sql_results[index]['localtime'] = foreign_naive.astimezone(sb_timezone)
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = { 'title': 'View Paused:', 'path': {'': ''} }
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{ 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{ 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return _munge(t)
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
@cherrypy.expose
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % cherrypy.request.remote.ip)
poster_url = cherrypy.url().replace('ical', '')
time_re = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})')
# Create a iCal string
ical = 'BEGIN:VCALENDAR\n'
ical += 'VERSION:2.0\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\n'
# Get shows info
myDB = db.DBConnection()
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
calendar_shows = myDB.select("SELECT show_name, tvdb_id, network, airs, runtime FROM tv_shows WHERE status = 'Continuing' AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select("SELECT tvdbid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["tvdb_id"])))
for episode in episode_list:
# Get local timezone and load network timezones
local_zone = tz.tzlocal()
try:
network_zone = network_timezones.get_network_timezone(show['network'], network_timezones.load_network_dict(), local_zone)
except:
# Dummy network_zone for exceptions
network_zone = None
# Get the air date and time
air_date = datetime.datetime.fromordinal(int(episode['airdate']))
air_time = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})').search(show["airs"])
# Parse out the air time
try:
if (air_time.group(4).lower() == 'pm' and int(air_time.group(1)) == 12):
t = datetime.time(12, int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'pm'):
t = datetime.time((int(air_time.group(1)) + 12), int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'am' and int(air_time.group(1)) == 12):
t = datetime.time(0, int(air_time.group(2)), 0, tzinfo=network_zone)
else:
t = datetime.time(int(air_time.group(1)), int(air_time.group(2)), 0, tzinfo=network_zone)
except:
# Dummy time for exceptions
t = datetime.time(22, 0, 0, tzinfo=network_zone)
# Combine air time and air date into one datetime object
air_date_time = datetime.datetime.combine(air_date, t).astimezone(local_zone)
# Create event for episode
ical = ical + 'BEGIN:VEVENT\n'
ical = ical + 'DTSTART:' + str(air_date_time.date()).replace("-", "") + '\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ': ' + episode['name'] + '\n'
ical = ical + 'UID:' + str(datetime.date.today().isoformat()) + '-' + str(random.randint(10000,99999)) + '@Sick-Beard\n'
if (episode['description'] != ''):
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\\n\\n' + episode['description'] + '\n'
else:
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\n'
ical = ical + 'LOCATION:' + 'Episode ' + str(episode['episode']) + ' - Season ' + str(episode['season']) + '\n'
ical = ical + 'END:VEVENT\n'
# Ending the iCal
ical += 'END:VCALENDAR\n'
return ical
manage = Manage()
history = History()
config = Config()
home = Home()
api = Api()
browser = browser.WebFileBrowser()
errorlogs = ErrorLogs()
ui = UI()
| gpl-3.0 | -7,634,210,705,754,107,000 | 37.451903 | 565 | 0.567039 | false |
hrishioa/Aviato | flask/Lib/site-packages/shapely/geometry/base.py | 1 | 27715 | """Base geometry class and utilities
"""
import sys
from warnings import warn
from binascii import a2b_hex
from ctypes import pointer, c_size_t, c_char_p, c_void_p
from shapely.coords import CoordinateSequence
from shapely.ftools import wraps
from shapely.geos import lgeos, ReadingError
from shapely.geos import WKBWriter, WKTWriter
from shapely.impl import DefaultImplementation, delegated
if sys.version_info[0] < 3:
range = xrange
GEOMETRY_TYPES = [
'Point',
'LineString',
'LinearRing',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
'GeometryCollection',
]
def dump_coords(geom):
"""Dump coordinates of a geometry in the same order as data packing"""
if not isinstance(geom, BaseGeometry):
raise ValueError('Must be instance of a geometry class; found ' +
geom.__class__.__name__)
elif geom.type in ('Point', 'LineString', 'LinearRing'):
return geom.coords[:]
elif geom.type == 'Polygon':
return geom.exterior.coords[:] + [i.coords[:] for i in geom.interiors]
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
# Recursive call
return [dump_coords(part) for part in geom]
else:
raise ValueError('Unhandled geometry type: ' + repr(geom.type))
def geometry_type_name(g):
if g is None:
raise ValueError("Null geometry has no type")
return GEOMETRY_TYPES[lgeos.GEOSGeomTypeId(g)]
def geom_factory(g, parent=None):
# Abstract geometry factory for use with topological methods below
if not g:
raise ValueError("No Shapely geometry can be created from null value")
ob = BaseGeometry()
geom_type = geometry_type_name(g)
# TODO: check cost of dynamic import by profiling
mod = __import__(
'shapely.geometry',
globals(),
locals(),
[geom_type],
)
ob.__class__ = getattr(mod, geom_type)
ob.__geom__ = g
ob.__p__ = parent
if lgeos.methods['has_z'](g):
ob._ndim = 3
else:
ob._ndim = 2
return ob
def geom_from_wkt(data):
warn("`geom_from_wkt` is deprecated. Use `geos.wkt_reader.read(data)`.",
DeprecationWarning)
if sys.version_info[0] >= 3:
data = data.encode('ascii')
geom = lgeos.GEOSGeomFromWKT(c_char_p(data))
if not geom:
raise ReadingError(
"Could not create geometry because of errors while reading input.")
return geom_factory(geom)
def geom_to_wkt(ob):
warn("`geom_to_wkt` is deprecated. Use `geos.wkt_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
return lgeos.GEOSGeomToWKT(ob._geom)
def deserialize_wkb(data):
geom = lgeos.GEOSGeomFromWKB_buf(c_char_p(data), c_size_t(len(data)))
if not geom:
raise ReadingError(
"Could not create geometry because of errors while reading input.")
return geom
def geom_from_wkb(data):
warn("`geom_from_wkb` is deprecated. Use `geos.wkb_reader.read(data)`.",
DeprecationWarning)
return geom_factory(deserialize_wkb(data))
def geom_to_wkb(ob):
warn("`geom_to_wkb` is deprecated. Use `geos.wkb_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
size = c_size_t()
return lgeos.GEOSGeomToWKB_buf(c_void_p(ob._geom), pointer(size))
def geos_geom_from_py(ob, create_func=None):
"""Helper function for geos_*_from_py functions in each geom type.
If a create_func is specified the coodinate sequence is cloned and a new
geometry is created with it, otherwise the geometry is cloned directly.
This behaviour is useful for converting between LineString and LinearRing
objects.
"""
if create_func is None:
geom = lgeos.GEOSGeom_clone(ob._geom)
else:
cs = lgeos.GEOSGeom_getCoordSeq(ob._geom)
cs = lgeos.GEOSCoordSeq_clone(cs)
geom = create_func(cs)
N = ob._ndim
return geom, N
def exceptNull(func):
"""Decorator which helps avoid GEOS operations on null pointers."""
@wraps(func)
def wrapper(*args, **kwargs):
if not args[0]._geom or args[0].is_empty:
raise ValueError("Null/empty geometry supports no operations")
return func(*args, **kwargs)
return wrapper
class CAP_STYLE(object):
round = 1
flat = 2
square = 3
class JOIN_STYLE(object):
round = 1
mitre = 2
bevel = 3
EMPTY = deserialize_wkb(a2b_hex(b'010700000000000000'))
class BaseGeometry(object):
"""
Provides GEOS spatial predicates and topological operations.
"""
# Attributes
# ----------
# __geom__ : c_void_p
# Cached ctypes pointer to GEOS geometry. Not to be accessed.
# _geom : c_void_p
# Property by which the GEOS geometry is accessed.
# __p__ : object
# Parent (Shapely) geometry
# _ctypes_data : object
# Cached ctypes data buffer
# _ndim : int
# Number of dimensions (2 or 3, generally)
# _crs : object
# Coordinate reference system. Available for Shapely extensions, but
# not implemented here.
# _other_owned : bool
# True if this object's GEOS geometry is owned by another as in the
# case of a multipart geometry member.
__geom__ = EMPTY
__p__ = None
_ctypes_data = None
_ndim = None
_crs = None
_other_owned = False
# Backend config
impl = DefaultImplementation
@property
def _is_empty(self):
return self.__geom__ in [EMPTY, None]
# a reference to the so/dll proxy to preserve access during clean up
_lgeos = lgeos
def empty(self, val=EMPTY):
# TODO: defer cleanup to the implementation. We shouldn't be
# explicitly calling a lgeos method here.
if not self._is_empty and not self._other_owned and self.__geom__:
try:
self._lgeos.GEOSGeom_destroy(self.__geom__)
except AttributeError:
pass # _lgeos might be empty on shutdown
self.__geom__ = val
def __del__(self):
self.empty(val=None)
self.__p__ = None
def __str__(self):
return self.wkt
# To support pickling
def __reduce__(self):
return (self.__class__, (), self.wkb)
def __setstate__(self, state):
self.empty()
self.__geom__ = deserialize_wkb(state)
if lgeos.methods['has_z'](self.__geom__):
self._ndim = 3
else:
self._ndim = 2
@property
def _geom(self):
return self.__geom__
@_geom.setter
def _geom(self, val):
self.empty()
self.__geom__ = val
# Operators
# ---------
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
tuple(self.coords) == tuple(other.coords)
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
# Array and ctypes interfaces
# ---------------------------
@property
def ctypes(self):
"""Return ctypes buffer"""
raise NotImplementedError
@property
def array_interface_base(self):
if sys.byteorder == 'little':
typestr = '<f8'
elif sys.byteorder == 'big':
typestr = '>f8'
else:
raise ValueError(
"Unsupported byteorder: neither little nor big-endian")
return {
'version': 3,
'typestr': typestr,
'data': self.ctypes,
}
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError
# Coordinate access
# -----------------
def _get_coords(self):
"""Access to geometry's coordinates (CoordinateSequence)"""
if self.is_empty:
return []
return CoordinateSequence(self)
def _set_coords(self, ob):
raise NotImplementedError(
"set_coords must be provided by derived classes")
coords = property(_get_coords, _set_coords)
@property
def xy(self):
"""Separate arrays of X and Y coordinate values"""
raise NotImplementedError
# Python feature protocol
@property
def __geo_interface__(self):
"""Dictionary representation of the geometry"""
raise NotImplementedError
# Type of geometry and its representations
# ----------------------------------------
def geometryType(self):
return geometry_type_name(self._geom)
@property
def type(self):
return self.geometryType()
def to_wkb(self):
warn("`to_wkb` is deprecated. Use the `wkb` property.",
DeprecationWarning)
return geom_to_wkb(self)
def to_wkt(self):
warn("`to_wkt` is deprecated. Use the `wkt` property.",
DeprecationWarning)
return geom_to_wkt(self)
@property
def wkt(self, **kw):
"""WKT representation of the geometry"""
return WKTWriter(lgeos, **kw).write(self)
@property
def wkb(self):
"""WKB representation of the geometry"""
return WKBWriter(lgeos).write(self)
@property
def wkb_hex(self):
"""WKB hex representation of the geometry"""
return WKBWriter(lgeos).write_hex(self)
def svg(self, scale_factor=1., **kwargs):
"""Raises NotImplementedError"""
raise NotImplementedError
def _repr_svg_(self):
"""SVG representation for iPython notebook"""
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if self.is_empty:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.bounds
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = self.buffer(1).bounds
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1.
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box, width, height, transform,
self.svg(scale_factor))
@property
def geom_type(self):
"""Name of the geometry's type, such as 'Point'"""
return self.geometryType()
# Real-valued properties and methods
# ----------------------------------
@property
def area(self):
"""Unitless area of the geometry (float)"""
return self.impl['area'](self)
def distance(self, other):
"""Unitless distance to other geometry (float)"""
return self.impl['distance'](self, other)
@property
def length(self):
"""Unitless length of the geometry (float)"""
return self.impl['length'](self)
# Topological properties
# ----------------------
@property
def boundary(self):
"""Returns a lower dimension geometry that bounds the object
The boundary of a polygon is a line, the boundary of a line is a
collection of points. The boundary of a point is an empty (null)
collection.
"""
return geom_factory(self.impl['boundary'](self))
@property
def bounds(self):
"""Returns minimum bounding region (minx, miny, maxx, maxy)"""
if self.is_empty:
return ()
else:
return self.impl['bounds'](self)
@property
def centroid(self):
"""Returns the geometric center of the object"""
return geom_factory(self.impl['centroid'](self))
@delegated
def representative_point(self):
"""Returns a point guaranteed to be within the object, cheaply."""
return geom_factory(self.impl['representative_point'](self))
@property
def convex_hull(self):
"""Imagine an elastic band stretched around the geometry: that's a
convex hull, more or less
The convex hull of a three member multipoint, for example, is a
triangular polygon.
"""
return geom_factory(self.impl['convex_hull'](self))
@property
def envelope(self):
"""A figure that envelopes the geometry"""
return geom_factory(self.impl['envelope'](self))
def buffer(self, distance, resolution=16, quadsegs=None,
cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round,
mitre_limit=5.0):
"""Returns a geometry with an envelope at a distance from the object's
envelope
A negative distance has a "shrink" effect. A zero distance may be used
to "tidy" a polygon. The resolution of the buffer around each vertex of
the object increases by increasing the resolution keyword parameter
or second positional parameter. Note: the use of a `quadsegs` parameter
is deprecated and will be gone from the next major release.
The styles of caps are: CAP_STYLE.round (1), CAP_STYLE.flat (2), and
CAP_STYLE.square (3).
The styles of joins between offset segments are: JOIN_STYLE.round (1),
JOIN_STYLE.mitre (2), and JOIN_STYLE.bevel (3).
The mitre limit ratio is used for very sharp corners. The mitre ratio
is the ratio of the distance from the corner to the end of the mitred
offset corner. When two line segments meet at a sharp angle, a miter
join will extend the original geometry. To prevent unreasonable
geometry, the mitre limit allows controlling the maximum length of the
join corner. Corners with a ratio which exceed the limit will be
beveled.
Example:
>>> from shapely.wkt import loads
>>> g = loads('POINT (0.0 0.0)')
>>> g.buffer(1.0).area # 16-gon approx of a unit radius circle
3.1365484905459389
>>> g.buffer(1.0, 128).area # 128-gon approximation
3.1415138011443009
>>> g.buffer(1.0, 3).area # triangle approximation
3.0
>>> list(g.buffer(1.0, cap_style='square').exterior.coords)
[(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0)]
>>> g.buffer(1.0, cap_style='square').area
4.0
"""
if quadsegs is not None:
warn(
"The `quadsegs` argument is deprecated. Use `resolution`.",
DeprecationWarning)
res = quadsegs
else:
res = resolution
if mitre_limit == 0.0:
raise ValueError(
'Cannot compute offset from zero-length line segment')
if cap_style == CAP_STYLE.round and join_style == JOIN_STYLE.round:
return geom_factory(self.impl['buffer'](self, distance, res))
if 'buffer_with_style' not in self.impl:
raise NotImplementedError("Styled buffering not available for "
"GEOS versions < 3.2.")
return geom_factory(self.impl['buffer_with_style'](self, distance, res,
cap_style,
join_style,
mitre_limit))
@delegated
def simplify(self, tolerance, preserve_topology=True):
"""Returns a simplified geometry produced by the Douglas-Puecker
algorithm
Coordinates of the simplified geometry will be no more than the
tolerance distance from the original. Unless the topology preserving
option is used, the algorithm may produce self-intersecting or
otherwise invalid geometries.
"""
if preserve_topology:
op = self.impl['topology_preserve_simplify']
else:
op = self.impl['simplify']
return geom_factory(op(self, tolerance))
# Binary operations
# -----------------
def difference(self, other):
"""Returns the difference of the geometries"""
return geom_factory(self.impl['difference'](self, other))
def intersection(self, other):
"""Returns the intersection of the geometries"""
return geom_factory(self.impl['intersection'](self, other))
def symmetric_difference(self, other):
"""Returns the symmetric difference of the geometries
(Shapely geometry)"""
return geom_factory(self.impl['symmetric_difference'](self, other))
def union(self, other):
"""Returns the union of the geometries (Shapely geometry)"""
return geom_factory(self.impl['union'](self, other))
# Unary predicates
# ----------------
@property
def has_z(self):
"""True if the geometry's coordinate sequence(s) have z values (are
3-dimensional)"""
return bool(self.impl['has_z'](self))
@property
def is_empty(self):
"""True if the set of points in this geometry is empty, else False"""
return (self._geom is None) or bool(self.impl['is_empty'](self))
@property
def is_ring(self):
"""True if the geometry is a closed ring, else False"""
return bool(self.impl['is_ring'](self))
@property
def is_closed(self):
"""True if the geometry is closed, else False
Applicable only to 1-D geometries."""
if self.geom_type == 'LinearRing':
return True
elif self.geom_type == 'LineString':
if 'is_closed' in self.impl:
return bool(self.impl['is_closed'](self))
else:
return self.coords[0] == self.coords[-1]
else:
return False
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return bool(self.impl['is_simple'](self))
@property
def is_valid(self):
"""True if the geometry is valid (definition depends on sub-class),
else False"""
return bool(self.impl['is_valid'](self))
# Binary predicates
# -----------------
def relate(self, other):
"""Returns the DE-9IM intersection matrix for the two geometries
(string)"""
return self.impl['relate'](self, other)
def contains(self, other):
"""Returns True if the geometry contains the other, else False"""
return bool(self.impl['contains'](self, other))
def crosses(self, other):
"""Returns True if the geometries cross, else False"""
return bool(self.impl['crosses'](self, other))
def disjoint(self, other):
"""Returns True if geometries are disjoint, else False"""
return bool(self.impl['disjoint'](self, other))
def equals(self, other):
"""Returns True if geometries are equal, else False"""
return bool(self.impl['equals'](self, other))
def intersects(self, other):
"""Returns True if geometries intersect, else False"""
return bool(self.impl['intersects'](self, other))
def overlaps(self, other):
"""Returns True if geometries overlap, else False"""
return bool(self.impl['overlaps'](self, other))
def touches(self, other):
"""Returns True if geometries touch, else False"""
return bool(self.impl['touches'](self, other))
def within(self, other):
"""Returns True if geometry is within the other, else False"""
return bool(self.impl['within'](self, other))
def equals_exact(self, other, tolerance):
"""Returns True if geometries are equal to within a specified
tolerance"""
# return BinaryPredicateOp('equals_exact', self)(other, tolerance)
return bool(self.impl['equals_exact'](self, other, tolerance))
def almost_equals(self, other, decimal=6):
"""Returns True if geometries are equal at all coordinates to a
specified decimal place"""
return self.equals_exact(other, 0.5 * 10**(-decimal))
# Linear referencing
# ------------------
@delegated
def project(self, other, normalized=False):
"""Returns the distance along this geometry to a point nearest the
specified point
If the normalized arg is True, return the distance normalized to the
length of the linear geometry.
"""
if normalized:
op = self.impl['project_normalized']
else:
op = self.impl['project']
return op(self, other)
@delegated
def interpolate(self, distance, normalized=False):
"""Return a point at the specified distance along a linear geometry
If the normalized arg is True, the distance will be interpreted as a
fraction of the geometry's length.
"""
if normalized:
op = self.impl['interpolate_normalized']
else:
op = self.impl['interpolate']
return geom_factory(op(self, distance))
class BaseMultipartGeometry(BaseGeometry):
def shape_factory(self, *args):
# Factory for part instances, usually a geometry class
raise NotImplementedError("To be implemented by derived classes")
@property
def ctypes(self):
raise NotImplementedError(
"Multi-part geometries have no ctypes representations")
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError("Multi-part geometries do not themselves "
"provide the array interface")
def _get_coords(self):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
def _set_coords(self, ob):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
@property
def coords(self):
raise NotImplementedError(
"Multi-part geometries do not provide a coordinate sequence")
@property
def geoms(self):
if self.is_empty:
return []
return GeometrySequence(self, self.shape_factory)
def __iter__(self):
if not self.is_empty:
return iter(self.geoms)
else:
return iter([])
def __len__(self):
if not self.is_empty:
return len(self.geoms)
else:
return 0
def __getitem__(self, index):
if not self.is_empty:
return self.geoms[index]
else:
return ()[index]
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
len(self) == len(other) and
all(x == y for x, y in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
def svg(self, scale_factor=1., color=None):
"""Returns a group of SVG elements for the multipart geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
color : str, optional
Hex string for stroke or fill color. Default is to use "#66cc99"
if geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if color is None:
color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, color) for p in self) + \
'</g>'
class GeometrySequence(object):
"""
Iterative access to members of a homogeneous multipart geometry.
"""
# Attributes
# ----------
# _factory : callable
# Returns instances of Shapely geometries
# _geom : c_void_p
# Ctypes pointer to the parent's GEOS geometry
# _ndim : int
# Number of dimensions (2 or 3, generally)
# __p__ : object
# Parent (Shapely) geometry
shape_factory = None
_geom = None
__p__ = None
_ndim = None
def __init__(self, parent, type):
self.shape_factory = type
self.__p__ = parent
def _update(self):
self._geom = self.__p__._geom
self._ndim = self.__p__._ndim
def _get_geom_item(self, i):
g = self.shape_factory()
g._other_owned = True
g._geom = lgeos.GEOSGetGeometryN(self._geom, i)
g._ndim = self._ndim
g.__p__ = self
return g
def __iter__(self):
self._update()
for i in range(self.__len__()):
yield self._get_geom_item(i)
def __len__(self):
self._update()
return lgeos.GEOSGetNumGeometries(self._geom)
def __getitem__(self, key):
self._update()
m = self.__len__()
if isinstance(key, int):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_geom_item(i)
elif isinstance(key, slice):
if type(self) == HeterogeneousGeometrySequence:
raise TypeError(
"Heterogenous geometry collections are not sliceable")
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
res.append(self._get_geom_item(i))
return type(self.__p__)(res or None)
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
class HeterogeneousGeometrySequence(GeometrySequence):
"""
Iterative access to a heterogeneous sequence of geometries.
"""
def __init__(self, parent):
super(HeterogeneousGeometrySequence, self).__init__(parent, None)
def _get_geom_item(self, i):
sub = lgeos.GEOSGetGeometryN(self._geom, i)
g = geom_factory(sub, parent=self)
g._other_owned = True
return g
def _test():
"""Test runner"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| gpl-2.0 | -4,349,629,784,772,605,400 | 30.602052 | 79 | 0.57799 | false |
BrechtBa/python-git-package | python_git_package/python_git_package.py | 1 | 16982 | #!/usr/bin/env/ python
################################################################################
# Copyright 2016 Brecht Baeten
# This file is part of python-git-package.
#
# python-git-package is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-git-package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-git-package. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import sys
import os
import datetime
import re
import subprocess
import zipfile
import sphinx
import distutils.core
from . import utils
setup_file = utils.load_template('setup.py')
readme_file = utils.load_template('README.rst')
gitignore_file = utils.load_template('gitignore')
test_file = utils.load_template('tests.py')
function_scaffold = utils.load_template('function_scaffold.py')
license_headers = utils.load_templates_folder('license/header')
license_texts = utils.load_templates_folder('license/text')
docs_conf_file = utils.load_template('sphinx/conf.py')
docs_index_file = utils.load_template('sphinx/index.rst')
docs_packagename_file = utils.load_template('sphinx/packagename.rst')
thisfilepath = os.path.dirname(os.path.realpath(__file__))
def init():
"""
Scaffolding
Examples
--------
.. code-block:: bash
pgp init
"""
# default package data
package_data = {}
package_data['packagename'] = os.path.split(os.getcwd())[-1]
package_data['description'] = ''
package_data['url'] = ''
package_data['author'] = 'me'
package_data['author_email'] = ''
package_data['license'] = 'MIT'
package_data['python_version'] = '2.7'
# current year
now = datetime.datetime.now()
package_data['year'] = now.year
# check for existing files
createsetup = True
if os.path.isfile('setup.py'):
response = utils.raw_input_validated(
'A setup.py file was found, keep this file? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
createsetup = False
createmanifest = True
if os.path.isfile('manifest.in'):
response = utils.raw_input_validated(
'A manifest.in file was found, keep this file? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
createmanifest = False
createlicense = True
if os.path.isfile('LICENSE') or os.path.isfile('license') or os.path.isfile('LICENSE.txt') or os.path.isfile(
'license.txt') or os.path.isfile('LICENSE.md') or os.path.isfile('license.md'):
response = utils.raw_input_validated(
'A license file was found, keep this file? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
createlicense = False
createreadme = True
if os.path.isfile('README') or os.path.isfile('readme') or os.path.isfile('README.rst') or os.path.isfile(
'readme.rst') or os.path.isfile('README.md') or os.path.isfile('readme.md'):
response = utils.raw_input_validated(
'A readme file was found, keep this file? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
createreadme = False
creategitignore = True
if os.path.isfile('.gitignore'):
response = utils.raw_input_validated(
'A .gitignore file was found, keep this file? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
creategitignore = False
createdocs = True
if os.path.isdir('doc'):
response = utils.raw_input_validated(
'A doc directory was found, keep this directory? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['y', 'yes']:
createdocs = False
else:
response = utils.raw_input_validated(
'Create sphinx doc directory? (y)', 'y',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['n', 'no']:
createdocs = False
# check existing files for package data
if not createsetup:
package_data.update(get_data_from_setup())
# ask for the package data
print('')
package_data['packagename'] = utils.raw_input('Package name ({}): '.format(package_data['packagename']))\
or package_data['packagename']
package_data['packagename_file'] = package_data['packagename'].replace('-', '_')
package_data['packagename_caps'] = package_data['packagename_file'].title()
package_data['packagename_underline'] = package_data['packagename'] + '\n' + '=' * len(package_data['packagename'])
package_data['description'] = utils.raw_input('Package description ({}): '.format(package_data['description'])) \
or package_data['description']
package_data['url'] = utils.raw_input('Package url ({}): '.format(package_data['url'])) or package_data['url']
package_data['author'] = utils.raw_input('Author ({}): '.format(package_data['author'])) or package_data['author']
package_data['author_email'] = utils.raw_input('Author email ({}): '.format(package_data['author_email'])) \
or package_data['author_email']
package_data['license'] = utils.raw_input_validated(
'License ({}): '.format(package_data['license']),
package_data['license'],
license_texts.keys(),
'Error: {} is not a valid license name',
'Valid licence names are:')
package_data['python_version'] = utils.raw_input('Python version ({}): '.format(package_data['python_version'])) \
or package_data['python_version']
# create folders
if not os.path.exists(package_data['packagename_file']):
os.makedirs(package_data['packagename_file'])
if not os.path.exists('tests'):
os.makedirs('tests')
if not os.path.exists('examples'):
os.makedirs('examples')
# create files if they are not present
if createsetup:
file = open('setup.py', 'w+')
file.write(setup_file.format(**package_data))
file.close()
if createmanifest:
file = open('manifest.in', 'w+')
file.write('include README.md\ninclude LICENSE\ninclude examples/example.py')
file.close()
if createreadme:
file = open('README.rst', 'w+')
file.write(readme_file.format(**package_data))
file.close()
if createlicense:
file = open('LICENSE', 'w+')
file.write(license_texts[package_data['license']])
file.close()
if creategitignore:
file = open('.gitignore', 'w+')
file.write(gitignore_file)
file.close()
if createdocs:
if not os.path.isdir('doc'):
os.mkdir('doc')
if not os.path.isdir('doc/source'):
os.mkdir('doc/source')
if not os.path.isdir('doc/build'):
os.mkdir('doc/build')
if not os.path.isdir('doc/source/_static'):
os.mkdir('doc/source/_static')
if not os.path.isdir('doc/source/_templates'):
os.mkdir('doc/source/_templates')
file = open('doc/source/conf.py', 'w+')
file.write(docs_conf_file.format(**package_data))
file.close()
file = open('doc/source/index.rst', 'w+')
file.write(docs_index_file.format(**package_data))
file.close()
file = open('doc/source/{}.rst'.format(package_data['packagename_file']), 'w+')
file.write(docs_packagename_file.format(**package_data))
file.close()
file = open('doc/.gitignore', 'w+')
file.write('build')
file.close()
filename = os.path.join(package_data['packagename_file'], '__init__.py')
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write('from .__version__ import version as __version__\n')
file.write('from {} import *\n'.format(package_data['packagename_file']))
file.close()
filename = os.path.join(package_data['packagename_file'], '__version__.py')
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write('version = \'0.0.0\'')
file.close()
filename = os.path.join(package_data['packagename_file'], '{}.py'.format(package_data['packagename_file']))
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write(license_headers[package_data['license']].format(**package_data))
file.write('\n')
file.write(function_scaffold)
file.close()
filename = os.path.join('examples', 'example.py')
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write(license_headers[package_data['license']].format(**package_data))
file.close()
filename = os.path.join('tests', 'test_{}.py'.format(package_data['packagename_file']))
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write(license_headers[package_data['license']].format(**package_data))
file.write(test_file.format(**package_data))
file.close()
filename = os.path.join('tests', 'all.py')
if not os.path.isfile(filename):
file = open(filename, 'w+')
file.write(license_headers[package_data['license']].format(**package_data))
file.write('import unittest\n\n')
file.write('from test_{packagename_file} import *\n\n'.format(**package_data))
file.write('if __name__ == \'__main__\':\n unittest.main()')
file.close()
# initialise a git repository
subprocess.check_output(['git', 'init'])[:-1]
subprocess.check_output(['git', 'add', '.'])[:-1]
subprocess.check_output(['git', 'commit', '-m', 'initial commit'])[:-1]
subprocess.check_output(['git', 'checkout', '-b', 'dev'])
def release():
"""
Creates a new release
Examples
--------
.. code-block:: bash
pgp release
"""
# search for a version file
versionfilename = ''
for d in os.walk('.'):
if not 'build' in d[0]:
filename = os.path.join(d[0], '__version__.py')
if os.path.isfile(filename):
versionfilename = filename
break
if filename == '':
print('Could not find __version__.py')
# get the previous version number from git
output = subprocess.check_output(['git', 'tag'])[:-1]
if isinstance(output, bytes):
output = output.decode('utf-8')
if not output == '':
splitoutput = output.split('\n')
oldversion = splitoutput[-1]
else:
# try to get the old version number from __version__.py
try:
with open(versionfilename, 'r') as f:
content = f.readline()
splitcontent = content.split('\'')
oldversion = splitcontent[1]
except:
print('Error while checking the version number. Check __version__.py')
return
splitoldversion = oldversion.split('.')
print('previous version: {}'.format(oldversion))
# ask for a new version number
version_ok = False
while not version_ok:
version = utils.raw_input('new version number: ')
try:
# check if the new version is higher than the old version
splitversion = version.split('.')
splitversion += [0]*(len(splitoldversion)-len(splitversion))
splitoldversion += [0]*(len(splitversion)-len(splitoldversion))
if sum([int(v) * 1000 ** i for i, v in enumerate(splitversion[::-1])]) <= sum(
[int(v) * 1000 ** i for i, v in enumerate(splitoldversion[::-1])]):
print('The new version ({}) is not higher than the old version ({})'.format(version, oldversion))
else:
version_ok = True
except:
print('Invalid version')
branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[:-1]
# changelog = ''
# response = utils.raw_input_validated(
# 'Did you update the changelog? ', '',
# ['y', 'n', 'yes', 'no'],
# 'Error: {} is not a valid response',
# 'Valid responses are:')
# if response in ['n', 'no']:
# print('Update the changelog before issuing a release')
# return
print('')
print('GIT branch: {}'.format(branch))
print('Version: {}'.format(version))
response = utils.raw_input_validated(
'Is this ok? ', '',
['y', 'n', 'yes', 'no'],
'Error: {} is not a valid response',
'Valid responses are:')
if response in ['n', 'no']:
print('Exit')
return
# write the new version number to version.py
with open(versionfilename, 'w') as f:
f.write('version = \'{}\''.format(version))
# build the documentation
if os.path.exists('doc/source'):
doc()
# create a commit message
message = 'Created new version\nVersion: {}'.format(version)
message += '\nThis is an automated commit.'
# create the commit
output = subprocess.check_output(['git', 'commit', '-a', '-m', message])[:-1]
# merge the branch with master
output = subprocess.check_output(['git', 'checkout', 'master'])[:-1]
output = subprocess.check_output(['git', 'merge', branch])[:-1]
# add a git tag
output = subprocess.check_output(['git', 'tag', '{}'.format(version)])[:-1]
# checkout the old branch
output = subprocess.check_output(['git', 'checkout', branch])[:-1]
# build an sdist
distutils.core.run_setup('setup.py', script_args=['sdist'])
def doc():
"""
Builds the documentation to html using Sphinx
Examples
--------
.. code-block:: bash
pgp doc
"""
# check if the doc folder exists
sourcedir = os.path.join('doc', 'source')
builddir = os.path.join('doc', 'build', 'html')
if os.path.exists(sourcedir):
output = sphinx.main(['html', sourcedir, builddir])
print(output)
# create a zip file
zipf = zipfile.ZipFile(os.path.join(builddir, 'html.zip'), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(os.path.join(builddir, 'html')):
for file in files:
fname = os.path.join(root, file)
aname = os.path.relpath(os.path.join(root, file), os.path.join(builddir, 'html'))
zipf.write(fname, aname)
# zipf.write(os.path.join(root, file))
zipf.close()
else:
print('Error: no sphinx documentation source found.')
print('Check doc/source')
def get_data_from_setup():
package_data = {}
with open('setup.py', 'r') as f:
for line in f:
match_obj = re.match('.*name=\'(.*)\'', line)
if match_obj:
package_data['name'] = match_obj.group(1)
match_obj = re.match('.*description=\'(.*)\'', line)
if match_obj:
package_data['description'] = match_obj.group(1)
match_obj = re.match('.*author=\'(.*)\'', line)
if match_obj:
package_data['author'] = match_obj.group(1)
match_obj = re.match('.*author_email=\'(.*)\'', line)
if match_obj:
package_data['author_email'] = match_obj.group(1)
match_obj = re.match('.*url=\'(.*)\'', line)
if match_obj:
package_data['url'] = match_obj.group(1)
match_obj = re.match('.*license=\'(.*)\'', line)
if match_obj:
package_data['license'] = match_obj.group(1)
return package_data
def execute_from_command_line():
command = sys.argv[1]
if command == 'init':
init()
elif command == 'release':
release()
elif command == 'doc':
doc()
else:
print('not a valid command')
print('usage:')
| gpl-3.0 | 7,694,822,997,281,497,000 | 34.082645 | 119 | 0.570024 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/application_gateway_backend_http_settings_py3.py | 1 | 3708 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayBackendHttpSettings(SubResource):
"""Backend address pool settings of an application gateway.
:param id: Resource ID.
:type id: str
:param port: Port
:type port: int
:param protocol: Protocol. Possible values are: 'Http' and 'Https'.
Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2016_09_01.models.ApplicationGatewayProtocol
:param cookie_based_affinity: Cookie based affinity. Possible values are:
'Enabled' and 'Disabled'. Possible values include: 'Enabled', 'Disabled'
:type cookie_based_affinity: str or
~azure.mgmt.network.v2016_09_01.models.ApplicationGatewayCookieBasedAffinity
:param request_timeout: Request timeout in seconds. Application Gateway
will fail the request if response is not received within RequestTimeout.
Acceptable values are from 1 second to 86400 seconds.
:type request_timeout: int
:param probe: Probe resource of an application gateway.
:type probe: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param authentication_certificates: Array of references to application
gateway authentication certificates.
:type authentication_certificates:
list[~azure.mgmt.network.v2016_09_01.models.SubResource]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'},
'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, port: int=None, protocol=None, cookie_based_affinity=None, request_timeout: int=None, probe=None, authentication_certificates=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(ApplicationGatewayBackendHttpSettings, self).__init__(id=id, **kwargs)
self.port = port
self.protocol = protocol
self.cookie_based_affinity = cookie_based_affinity
self.request_timeout = request_timeout
self.probe = probe
self.authentication_certificates = authentication_certificates
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| mit | 7,246,247,870,950,952,000 | 49.108108 | 252 | 0.661003 | false |
i-rabot/tractogithub | tracformatter/trac/upgrades/db29.py | 1 | 2023 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import shutil
from trac.util import create_unique_file
from trac.util.text import exception_to_unicode
_svn_components = [
'svn_fs.SubversionConnector',
'svn_prop.SubversionMergePropertyDiffRenderer',
'svn_prop.SubversionMergePropertyRenderer',
'svn_prop.SubversionPropertyRenderer',
]
_old_path = 'trac.versioncontrol.'
_new_path = 'tracopt.versioncontrol.svn.'
def do_upgrade(env, version, cursor):
"""Automatically enable tracopt.versioncontrol.svn.* components,
unless they were explicitly disabled or the new svn components are
already enabled.
"""
enable = [c for c in _svn_components
if env.is_component_enabled(_old_path + c) and
not env.is_component_enabled(_new_path + c)]
if not enable:
return
try:
backup, f = create_unique_file(env.config.filename
+ '.tracopt-svn.bak')
f.close()
shutil.copyfile(env.config.filename, backup)
env.log.info("Saved backup of configuration file in %s", backup)
except IOError, e:
env.log.warn("Couldn't save backup of configuration file (%s)",
exception_to_unicode(e))
for c in enable:
env.config.set('components', _new_path + c, 'enabled')
env.config.save()
env.log.info("Enabled components %r to cope with the move from %s to %s.",
enable,
_old_path.replace('.', '/'), _new_path.replace('.', '/'))
| bsd-3-clause | 8,694,380,195,205,799,000 | 35.462963 | 78 | 0.639644 | false |
MediffRobotics/DeepRobotics | DeepLearnMaterials/tutorials/real world examples/bank_marketing_learning/bank_marketing_sk.py | 1 | 4185 | # View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
This data set is from: http://archive.ics.uci.edu/ml/datasets/Bank+Marketing
Which is a real bank marketing data set. The required data are included in this example folder. You can download and
practice by yourself.
The 'bank-full.csv' data set has:
1) 17 inputs features (age, job, marital, education, default, balance, housing, loan,
contact, day, month, duration, campaign, pdays, previous, poutcome);
2) 1 output (The answer yes or no to deposit to the bank); and
3) 45211 samples
We will use this data set for training and testing.
"""
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split, ShuffleSplit, cross_val_score
from sklearn import preprocessing
import matplotlib.pyplot as plt
def feature_utility(data, selected_feature_name, target_name):
target_classes = data[target_name].unique()
feature_classes = data[selected_feature_name].unique()
indices = np.arange(len(feature_classes))
percentages = np.zeros((len(target_classes), len(feature_classes)))
for j, feature_class in enumerate(feature_classes):
particular_feature = data[selected_feature_name][data[selected_feature_name] == feature_class]
feature_total = len(particular_feature)
for i, target_class in enumerate(target_classes):
class_count = len(particular_feature[data[target_name] == target_class])
percentage = class_count/feature_total
percentages[i, j] = percentage
colors = ['r', 'b', 'g']
width = 1
bars = []
for i in range(len(target_classes)):
c_number = int(i % len(colors))
color = colors[c_number]
if i == 0:
bar = plt.bar(indices, percentages[i, :], width, color=color)
else:
bar = plt.bar(indices, percentages[i, :], width, color=color, bottom=percentages[:i, :].sum(axis=0))
bars.append(bar)
plt.xticks(indices + width/2, feature_classes)
plt.ylabel('Percentage')
plt.xlabel(selected_feature_name)
plt.legend([bar[0] for bar in bars], target_classes, loc='best')
plt.show()
def encode_label(data):
la_en = preprocessing.LabelEncoder()
for col in ['job', 'marital', 'education', 'default', 'housing', 'loan',
'contact', 'month', 'poutcome', 'y']:
data[col] = bank_data[col].astype('category')
data[col] = la_en.fit_transform(bank_data[col])
return data
dataset_path = ['bank.csv', 'bank-full.csv']
bank_data = pd.read_csv(dataset_path[1], sep=';')
print(bank_data.head())
# good categorical features: job, marital, education, housing, loan, contact, month, poutcome
# bad categorical features: default
# feature_utility(bank_data, 'housing', 'y')
bank_data = encode_label(bank_data)
# print(bank_data.dtypes)
# print(bank_data.head())
X_data, y_data = bank_data.iloc[:, :-1], bank_data.iloc[:, -1]
# show the percentage of answer yes and no.
answer_no, answer_yes = y_data.value_counts()
print('Percentage of answering no: ', answer_no/(answer_no+answer_yes))
X_train, X_test, y_train, y_test = train_test_split(
X_data, y_data,
test_size=0.2)
dt_clf = DecisionTreeClassifier(class_weight='balanced',)
rf_clf = RandomForestClassifier(class_weight='balanced')
# randomize the data, and run the cross validation for 5 times
cv = ShuffleSplit(X_data.shape[0], n_iter=5,
test_size=0.3, random_state=0)
print(cross_val_score(dt_clf, X_data, y_data, cv=cv, scoring='f1').mean())
print(cross_val_score(rf_clf, X_data, y_data, cv=cv, scoring='f1').mean())
# dt_clf.fit(X_train, y_train)
# print(dt_clf.score(X_test, y_test))
# rf_clf.fit(X_train, y_train)
# print(rf_clf.score(X_test, y_test))
# print(rf_clf.predict(X_test.iloc[10, :][np.newaxis, :]))
# print(y_test.iloc[10])
| gpl-3.0 | 4,880,204,816,180,642,000 | 38.857143 | 116 | 0.68865 | false |
adambiser/snes-wolf3d-extractor | extractor/ui/optionsframe.py | 1 | 1572 | import tkinter as tk
class OptionsFrame(tk.LabelFrame):
COLUMNS = 3
def __init__(self, parent, settings, **options):
super().__init__(parent, **options)
self.config(text='Export Options',
padx=5,
pady=5)
self.parent = parent
# Add widgets.
tk.Checkbutton(self,
text='Export to subfolder named after ROM',
variable=settings.export_to_subfolder,
).pack(anchor=tk.NW,
)
tk.Checkbutton(self,
text='Combine maps into single file',
variable=settings.combine_maps,
).pack(anchor=tk.NW,
)
subframe = tk.LabelFrame(self, text='ROM Entry Types')
subframe.pack(fill=tk.X,
)
keys = sorted(settings.export.keys())
print(settings.export)
row = 0
col = 0
for key in keys:
tk.Checkbutton(subframe,
text=key,
variable=settings.export[key],
).grid(row=row,
column=col,
sticky=tk.W,
)
col += 1
if col == OptionsFrame.COLUMNS:
row += 1
col = 0
for col in range(OptionsFrame.COLUMNS):
tk.Grid.columnconfigure(subframe, col, weight=1, uniform='a')
| mit | -1,231,272,076,526,034,000 | 34.727273 | 73 | 0.431934 | false |
vitorio/pygrow | grow/pods/preprocessors/file_watchers.py | 1 | 4044 | from grow.pods.preprocessors import translation
from grow.pods.preprocessors import routes_cache
from watchdog import events
from watchdog import observers
class PodspecFileEventHandler(events.PatternMatchingEventHandler):
patterns = ['*/podspec.yaml']
ignore_directories = True
def __init__(self, pod, managed_observer, *args, **kwargs):
self.pod = pod
self.managed_observer = managed_observer
super(PodspecFileEventHandler, self).__init__(*args, **kwargs)
def handle(self, event=None):
self.pod.reset_yaml()
self.pod.routes.reset_cache(rebuild=True)
self.managed_observer.reschedule_children()
def on_created(self, event):
self.handle(event)
def on_modified(self, event):
self.handle(event)
class PreprocessorEventHandler(events.PatternMatchingEventHandler):
num_runs = 0
def __init__(self, preprocessor, *args, **kwargs):
self.preprocessor = preprocessor
super(PreprocessorEventHandler, self).__init__(*args, **kwargs)
def handle(self, event=None):
if event is not None and event.is_directory:
return
if self.num_runs == 0:
self.preprocessor.first_run()
else:
self.preprocessor.run()
self.num_runs += 1
def on_created(self, event):
self.handle(event)
def on_modified(self, event):
self.handle(event)
class ManagedObserver(observers.Observer):
def __init__(self, pod):
self.pod = pod
self._preprocessor_watches = []
self._child_observers = []
super(ManagedObserver, self).__init__()
def schedule_podspec(self):
podspec_handler = PodspecFileEventHandler(self.pod, managed_observer=self)
self.schedule(podspec_handler, path=self.pod.root, recursive=False)
def schedule_builtins(self):
try:
preprocessor = routes_cache.RoutesCachePreprocessor(pod=self.pod)
self._schedule_preprocessor('/content/', preprocessor, patterns=['*'])
self._schedule_preprocessor('/static/', preprocessor, patterns=['*'])
preprocessor = translation.TranslationPreprocessor(pod=self.pod)
self._schedule_preprocessor('/translations/', preprocessor, patterns=['*.po'])
except OSError:
# No translations directory found.
pass
def schedule_preprocessors(self):
self._preprocessor_watches = []
for preprocessor in self.pod.list_preprocessors():
for path in preprocessor.list_watched_dirs():
watch = self._schedule_preprocessor(path, preprocessor)
self._preprocessor_watches.append(watch)
def _schedule_preprocessor(self, path, preprocessor, **kwargs):
if 'ignore_directories' in kwargs:
kwargs['ignore_directories'] = [self.pod.abs_path(p)
for p in kwargs['ignore_directories']]
path = self.pod.abs_path(path)
handler = PreprocessorEventHandler(preprocessor, **kwargs)
return self.schedule(handler, path=path, recursive=True)
def reschedule_children(self):
for observer in self._child_observers:
for watch in observer._preprocessor_watches:
observer.unschedule(watch)
observer.schedule_preprocessors()
def add_child(self, observer):
self._child_observers.append(observer)
return observer
def start(self):
for observer in self._child_observers:
observer.start()
super(ManagedObserver, self).start()
def stop(self):
for observer in self._child_observers:
observer.stop()
super(ManagedObserver, self).stop()
def join(self):
for observer in self._child_observers:
observer.join()
super(ManagedObserver, self).join()
def run_handlers(self):
for handlers in self._handlers.values():
for handler in handlers:
handler.handle()
def create_dev_server_observers(pod):
main_observer = ManagedObserver(pod)
main_observer.schedule_builtins()
main_observer.schedule_preprocessors()
podspec_observer = ManagedObserver(pod)
podspec_observer.schedule_podspec()
podspec_observer.add_child(main_observer)
podspec_observer.start()
return main_observer, podspec_observer
| mit | 3,954,190,714,449,856,500 | 30.348837 | 84 | 0.699802 | false |
avlach/univbris-ocf | optin_manager/src/python/openflow/tests/full/fulltests.py | 2 | 24951 | '''
Created on May 17, 2010
@author: jnaous
'''
import sys
from os.path import join, dirname
import subprocess
import shlex
PYTHON_DIR = join(dirname(__file__), "../../../")
sys.path.append(PYTHON_DIR)
from unittest import TestCase
from expedient.common.utils.certtransport import SafeTransportWithCert
from expedient.common.tests.client import Browser
from openflow.tests import test_settings
import xmlrpclib, re
from openflow.tests.helpers import kill_old_procs, parse_rspec
from openflow.tests.helpers import create_random_resv
from expedient.common.tests.commands import call_env_command, Env
from expedient.common.tests.utils import drop_to_shell, wrap_xmlrpc_call
import logging
logger = logging.getLogger(__name__)
from helpers import SSHClientPlus
RUN_FV_SUBPROCESS = True
SCHEME = "https" if test_settings.USE_HTTPS else "http"
class FullIntegration(TestCase):
MININET_TOPO = "tree,2"
EXPECTED_NUM_SWITCHES = 3
EXPECTED_NUM_LINKS = 4
NOX_APPS = "pyswitch packetdump"
def run_nox(self, mininet_vm, num, port_start):
"""
Connect to the mininet_vm and run 'num' instances of nox as
a pyswitch with packet dumping.
"""
kill_client = SSHClientPlus.exec_command_plus(
mininet_vm[0], "mininet", "mininet",
"sudo kill `ps -ae | grep lt-nox_core | awk '{ print $1 }'`",
port=mininet_vm[1],
)
kill_client.wait()
self.nox_clients = []
for i in xrange(num):
port = port_start + i
cmd = "cd noxcore/build/src; ./nox_core -i ptcp:%s %s" % (
port, self.NOX_APPS,
)
client = SSHClientPlus.exec_command_plus(
mininet_vm[0], "mininet", "mininet", cmd, port=mininet_vm[1],
)
self.nox_clients.append(client)
def connect_networks(self, flowvisors, mininet_vms, switch="ovsk"):
"""
Create a 2-switch, 2-host linear topology on each mininet vm
Connect the switches to the FV.
"""
num = min([len(flowvisors), len(mininet_vms)])
self.mininet_vm_clients = []
for i in xrange(num):
logger.debug("Connecting to the mininet VM at %s:%s" % mininet_vms[i])
cmd = "sudo mn --topo=%s " % self.MININET_TOPO +\
"--controller=remote --ip=%s --port=%s --mac --switch=%s" % (
flowvisors[i]["host"], flowvisors[i]["of_port"], switch,
)
client = SSHClientPlus.exec_command_plus(
mininet_vms[i][0], "mininet", "mininet", cmd,
port=mininet_vms[i][1],
)
self.mininet_vm_clients.append(client)
logger.debug(
client.wait_for_prompt(
prompt="Starting CLI:\n", timeout=test_settings.TIMEOUT))
def run_flowvisor(self, flowvisor):
"""
Run flowvisor.
Delete all the rules and slices.
"""
if RUN_FV_SUBPROCESS:
kill_old_procs(flowvisor["of_port"],
flowvisor["xmlrpc_port"])
self.fv_procs.append(
self.run_proc_cmd(
"%s/scripts/flowvisor.sh %s/%s 2>&1 | tee /tmp/flowvisor.out " % (
flowvisor["path"][0], flowvisor["path"][0],
flowvisor["path"][1],
)
)
)
id_re = re.compile(r"id=\[(?P<id>\d+)\]")
fv_url = "https://%s:%s@%s:%s" % (
flowvisor["username"], flowvisor["password"],
flowvisor["host"], flowvisor["xmlrpc_port"],
)
s = xmlrpclib.ServerProxy(fv_url)
logger.debug("Waiting for flowvisor to be up.")
ret = wrap_xmlrpc_call(s.api.ping, ["PONG"], {}, test_settings.TIMEOUT)
logger.debug("Ping returned: %s" % ret)
logger.debug("Getting flowspace from flowvisor")
flowspaces = s.api.listFlowSpace()
ops = []
logger.debug("Deleting all flowspace")
for fs in flowspaces:
id = id_re.search(fs).group("id")
ops.append(dict(operation="REMOVE", id=id))
if ops: s.api.changeFlowSpace(ops)
slices = s.api.listSlices()
[s.api.deleteSlice(slice) for slice in slices if slice != "root"]
self.fv_clients.append(s)
def prepare_om(self, proj_dir, flowvisor, ch_username, ch_passwd):
"""
Flush the OM DB and add a flowvisor and user for the CH
"""
call_env_command(proj_dir, "flush",
interactive=False)
self.om_env = Env(proj_dir)
self.om_env.switch_to()
from django.contrib.auth.models import User
from openflow.optin_manager.users.models import UserProfile
from openflow.optin_manager.xmlrpc_server.models import FVServerProxy
from openflow.optin_manager.xmlrpc_server.ch_api import om_ch_translate
from openflow.optin_manager.opts.models import AdminFlowSpace, UserFlowSpace
import random
# Create the clearinghouse user
u = User.objects.create(username=ch_username)
u.set_password(ch_passwd)
u.save()
profile = UserProfile.get_or_create_profile(u)
profile.is_clearinghouse_user = True
profile.save()
# make a normal user on system
username = "user"
password = "password"
u = User.objects.create(username=username, is_active=True)
u.set_password(password)
u.save()
# assign flowspace to the user
random.seed(0)
self.user_ip_src_s = random.randint(0,0x80000000) & 0xFFFF0000
self.user_ip_src_e = random.randint(0x80000000,0xFFFFFFFF) & 0xFFFF0000
fields=["dl_src","dl_dst","vlan_id","tp_src","tp_dst"]
random.shuffle(fields)
(to_str,from_str,width,om_name,of_name) = om_ch_translate.attr_funcs[fields[0]]
self.user_field_name = om_name
self.user_field_s = random.randint(0,2**(width)-3)
self.user_field_e = self.user_field_s + 1
# assign full flowspace to admin:
username = "admin"
password = "password"
adm = User.objects.create(username=username, is_superuser=True,
is_staff=True, is_active=True)
adm.set_password(password)
adm.save()
profile = UserProfile.get_or_create_profile(adm)
profile.is_net_admin = True
profile.supervisor = adm
profile.max_priority_level = 7000
profile.save()
AdminFlowSpace.objects.create(user=adm)
# assign flowspace to user
ufs = UserFlowSpace(user=u, ip_src_s=self.user_ip_src_s,
ip_src_e=self.user_ip_src_e,approver=adm)
setattr(ufs,"%s_s"%self.user_field_name,self.user_field_s)
setattr(ufs,"%s_e"%self.user_field_name,self.user_field_e)
ufs.save()
# Create the FV proxy connection
fv = FVServerProxy(
name="Flowvisor",
username=flowvisor["username"],
password=flowvisor["password"],
url="https://%s:%s/xmlrpc" % (
flowvisor["host"], flowvisor["xmlrpc_port"],
),
verify_certs=False,
)
fv.save()
self.om_client = xmlrpclib.ServerProxy(
SCHEME+"://%s:%s@%s:%s/xmlrpc/xmlrpc/" % (
ch_username, ch_passwd,
test_settings.HOST, test_settings.OM_PORT,
)
)
self.om_env.switch_from()
def prepare_ch(self, proj_dir, ch_host, ch_port, ch_username, ch_passwd,
om_host, om_port):
"""
Flush and prepare the CH DB.
Add the OMs to the CH.
"""
from os.path import dirname
subprocess.call(shlex.split(
"python %s/manage.py flush --noinput" % proj_dir))
subprocess.call(shlex.split(
"python %s/manage.py syncdb --noinput" % proj_dir))
subprocess.call(shlex.split(
"python %s/manage.py createsuperuser --noinput "
"--username=expedient [email protected]" % proj_dir))
subprocess.call(shlex.split(
"python %s/manage.py set_fake_passwords"
" --password=expedient" % proj_dir))
browser = Browser()
browser.login(
SCHEME+"://%s:%s%s" % (
ch_host, ch_port, "/accounts/login/"),
"expedient", "expedient")
response = browser.get_and_post_form(
url=SCHEME+"://%s:%s%s" % (
ch_host, ch_port, "/openflow/aggregate/create/"),
params=dict(
name="Test OM",
description="TestOM Description",
location="Stanford, CA",
usage_agreement="Do you agree?",
username=ch_username,
password=ch_passwd,
url=SCHEME+"://%s:%s%s" % (
om_host, om_port, "/xmlrpc/xmlrpc/"),
),
del_params=["verify_certs"],
)
self.assertTrue(
response.geturl() == \
SCHEME+"://%s:%s%s" % (
ch_host, ch_port, "/openflow/aggregate/1/links/"),
"Did not redirect after create correctly. Response was: %s"
% response.read(),
)
def run_proc_cmd(self, cmd, wait=False):
"""
Run a command in a subprocess, return the new process.
"""
if test_settings.SHOW_PROCESSES_IN_XTERM:
from expedient.common.tests.utils import run_cmd_in_xterm as run_cmd
else:
from expedient.common.tests.utils import run_cmd
if wait:
return run_cmd(cmd).wait()
else:
return run_cmd(cmd)
def run_am_proxy(self, gcf_dir, ssl_dir, am_port, ch_host, ch_port):
"""
Create the ssl certs for the tests.
Run the AM proxy in a separate process.
"""
# create the certs if not already there.
self.run_proc_cmd("make -C %s" % ssl_dir).wait()
# run the am
self.am_proc = self.run_proc_cmd(
"python %s -r %s -c %s -k %s -p %s -u %s --debug -H 0.0.0.0" % (
join(gcf_dir, "gam.py"), join(ssl_dir, "certs"),
join(ssl_dir, "server.crt"), join(ssl_dir, "server.key"),
am_port,
SCHEME + "://%s:%s/openflow/gapi/"
% (ch_host, ch_port),
)
)
cert_transport = SafeTransportWithCert(
keyfile=join(ssl_dir, "experimenter.key"),
certfile=join(ssl_dir, "experimenter.crt"))
self.am_client = xmlrpclib.ServerProxy(
"https://%s:%s/" % (test_settings.HOST, am_port),
transport=cert_transport)
def run_geni_ch(self, gcf_dir, ssl_dir, ch_port):
"""
Run the GENI Sample CH in a subprocess and connect to it.
"""
self.ch_proc = self.run_proc_cmd(
"python %s -u %s -r %s -c %s -k %s -p %s --debug -H 0.0.0.0" % (
join(gcf_dir, "gch.py"),
join(ssl_dir, "experimenter.crt"),
join(ssl_dir, "certs"),
join(ssl_dir, "ch.crt"), join(ssl_dir, "ch.key"),
ch_port,
)
)
cert_transport = SafeTransportWithCert(
keyfile=join(ssl_dir, "experimenter.key"),
certfile=join(ssl_dir, "experimenter.crt"))
self.ch_client = xmlrpclib.ServerProxy(
"https://%s:%s/" % (test_settings.HOST, ch_port),
transport=cert_transport)
def create_ch_slice(self):
"""
Code mostly copied from GENI test harness from BBN.
"""
import gcf.sfa.trust.credential as cred
slice_cred_string = wrap_xmlrpc_call(
self.ch_client.CreateSlice, [], {}, test_settings.TIMEOUT)
slice_credential = cred.Credential(string=slice_cred_string)
slice_gid = slice_credential.get_gid_object()
slice_urn = slice_gid.get_urn()
# Set up the array of credentials as just the slice credential
credentials = [slice_cred_string]
return (slice_urn, credentials)
def setUp(self):
"""
Run dummy networks and connect them to the FVs
Run dummy Controllers
Load the configuration for the OM
Load the configuration for the AM
"""
# Kill stale processes
kill_old_procs(test_settings.GAM_PORT, test_settings.GCH_PORT)
self.ch_username = "clearinghouse"
self.ch_passwd = "ch_password"
# clear all slices/flowspaces from fvs
self.fv_procs = []
self.fv_clients = []
for flowvisor in test_settings.FLOWVISORS:
self.run_flowvisor(flowvisor)
# # run experiment controllers
# self.run_nox(
# test_settings.MININET_VMS[0][0],
# test_settings.NUM_EXPERIMENTS,
# 6633,
# )
# connect the networks to FVs
self.connect_networks(
test_settings.FLOWVISORS,
test_settings.MININET_VMS,
test_settings.MININET_SWITCH_TYPE,
)
# setup the OM
self.prepare_om(
test_settings.OM_PROJECT_DIR,
test_settings.FLOWVISORS[0],
self.ch_username,
self.ch_passwd,
)
# setup Expedient (aka AM)
self.prepare_ch(
test_settings.CH_PROJECT_DIR,
test_settings.HOST,
test_settings.CH_PORT,
self.ch_username,
self.ch_passwd,
test_settings.HOST,
test_settings.OM_PORT,
)
# store the trusted CA dir
import os
from django.conf import settings as djangosettings
self.before = os.listdir(djangosettings.XMLRPC_TRUSTED_CA_PATH)
# Run the AM proxy for GENI and the GENI clearinghouse
self.run_geni_ch(
test_settings.GCF_DIR, test_settings.SSL_DIR, test_settings.GAM_PORT)
self.run_am_proxy(
test_settings.GCF_DIR, test_settings.SSL_DIR, test_settings.GCH_PORT,
test_settings.HOST, test_settings.CH_PORT)
def tearDown(self):
"""
Clean up the Flowvisor rules/slices
Clear running stuff and so on...
"""
# restore the trusted CA dir
from django.conf import settings as djangosettings
import os
after = os.listdir(djangosettings.XMLRPC_TRUSTED_CA_PATH)
for path in after:
if path not in self.before:
os.unlink(os.path.join(djangosettings.XMLRPC_TRUSTED_CA_PATH, path))
if test_settings.PAUSE_AFTER_TESTS:
raw_input("Press ENTER to continue:")
# clear all slices/flowspaces from fvs
if RUN_FV_SUBPROCESS:
for fv_proc in self.fv_procs:
try:
fv_proc.terminate()
except:
pass
self.am_proc.terminate()
self.ch_proc.terminate()
# kill ssh sessions
# for c in self.nox_clients:
# out = c.communicate("\03", check_closed=True)
# logger.debug("nox stdout %s" % out)
for c in self.mininet_vm_clients:
out = c.communicate("exit()\n", check_closed=True)
logger.debug("mn stdout %s" % out)
c.wait()
if RUN_FV_SUBPROCESS:
for flowvisor in test_settings.FLOWVISORS:
kill_old_procs(flowvisor["of_port"], flowvisor["xmlrpc_port"])
# Kill stale processes
kill_old_procs(test_settings.GAM_PORT, test_settings.GCH_PORT)
# for c in self.nox_clients:
# try:
# c.close()
# except:
# pass
for c in self.fv_procs:
try:
c.close()
except:
pass
for c in self.mininet_vm_clients:
try:
c.close()
except:
pass
def create_sliver_core(self,fs_randomness):
"""
Check that we can create slice on the FV
"""
# check no other slices
slices = self.fv_clients[0].api.listSlices()
self.assertEqual(len(slices), 1) # root
# get the resources
slice_urn, cred = self.test_ListResources()
# create a random reservation
slice_name = "SliceNameBla"
email = "[email protected]"
url = "tcp:%s:%s" % (test_settings.MININET_VMS[0][0], 6633)
resv_rspec, flowspaces = create_random_resv(
2, self.switches,
slice_name=slice_name,
email=email,
ctrl_url=url,
fs_randomness = fs_randomness,
)
users = [{'key':''}]
self.am_client.CreateSliver(slice_urn, cred, resv_rspec, users)
# TODO: check that the full reservation rspec is returned
slices = self.fv_clients[0].api.listSlices()
logger.debug(slices)
self.assertEqual(len(slices), 2) # root + new slice
fv_slice_name = slices[1] if slices[0] == "root" else slices[0]
# Check the name
self.assertTrue(
slice_name in fv_slice_name,
"Expected to find '%s' in slice name '%s', but didn't" % (
slice_name, fv_slice_name,
)
)
# Check the slice information
slice_info = self.fv_clients[0].api.getSliceInfo(fv_slice_name)
logger.debug("Slice info is %s" % slice_info)
self.assertEqual(slice_info["contact_email"], email)
self.assertEqual(slice_info["controller_port"], "6633")
self.assertEqual(slice_info["controller_hostname"],
test_settings.MININET_VMS[0][0])
return (slice_urn, cred)
def test_GetVersion(self):
ret = wrap_xmlrpc_call(
self.am_client.GetVersion, [], {}, test_settings.TIMEOUT)
self.assertEqual(ret['geni_api'], 1)
def test_ListResources(self):
"""
Check the list of resources.
"""
# check the switches on the FV
devices = self.fv_clients[0].api.listDevices()
logger.debug("FV devices: %s" % devices)
self.assertEqual(len(set(devices)), self.EXPECTED_NUM_SWITCHES)
slice_urn, cred = self.create_ch_slice()
options = dict(geni_compressed=False, geni_available=True)
rspec = wrap_xmlrpc_call(
self.am_client.ListResources, [cred, options], {},
test_settings.TIMEOUT)
logger.debug(rspec)
# Create switches and links
self.switches, self.links = parse_rspec(rspec)
# check the number of switches and links
self.assertEqual(len(self.switches), self.EXPECTED_NUM_SWITCHES)
self.assertEqual(len(self.links), self.EXPECTED_NUM_LINKS)
return slice_urn, cred
def test_CreateSliver(self):
"""
Check that we can create slice on the FV
"""
return self.create_sliver_core(fs_randomness=True)
def test_CreateDeleteSliver(self):
"""
Check that we can create then delete a sliver.
"""
slice_urn, cred = self.test_CreateSliver()
self.assertTrue(
wrap_xmlrpc_call(
self.am_client.DeleteSliver,
[slice_urn, cred], {},
test_settings.TIMEOUT),
"Failed to delete sliver.")
self.assertEqual(
len(self.fv_clients[0].api.listSlices()),
1,
"Slice not deleted at FlowVisor",
)
def test_UserOptIn1(self):
"""
Test a user opting in.
"""
from expedient.common.tests.client import Browser
#create an experiment through CH
self.create_sliver_core(fs_randomness=False)
logger.debug("Done creating sliver")
# Get user to opt in
logger.debug("Logging into browser")
b = Browser()
logged_in = b.login(SCHEME+"://%s:%s/accounts/login/"%
(test_settings.HOST, test_settings.OM_PORT),
"user","password")
self.assertTrue(logged_in,"Could not log in")
logger.debug("Login success")
# drop_to_shell(local=locals())
f = b.get_and_post_form(SCHEME+"://%s:%s/opts/opt_in"%
(test_settings.HOST, test_settings.OM_PORT),
dict(experiment=1))
logger.debug("Posted opt-in request, reading response.")
res = f.read()
self.assertEqual(f.code, 200)
self.assertTrue("successfully" in res, "Did not get successful opt in message: %s" % res)
# test if FV has the expected flowspace match entries
from expedient.common.tests.utils import run_cmd
fvctl = run_cmd(
"sh %s/scripts/fvctl.sh listFlowSpace" % (
test_settings.FLOWVISOR_DIR,
),
)
data = fvctl.communicate(input="rootpassword")
fv_rules = data[0].split("\n")
print(fv_rules)
#for fv_rule in fv_rules
logger.debug("Response fine, opting out.")
# now test opt out:
f = b.get_and_post_form(SCHEME+"://%s:%s/opts/opt_out"%
(test_settings.HOST, test_settings.OM_PORT),
{"1":"checked"})
res = f.read()
self.assertEqual(f.code, 200)
self.assertTrue("Successful" in res, "Returned %s"%res)
def test_UserOptIn2(self):
"""
Test a user opting in.
"""
from expedient.common.tests.client import Browser
from openflow.optin_manager.opts.models import Experiment,ExperimentFLowSpace
import random
slices = self.fv_clients[0].api.listSlices()
self.assertEqual(len(slices), 1) # root
# get the resources
slice_urn, cred = self.create_ch_slice()
options = dict(geni_compressed=False, geni_available=True)
rspec = self.am_client.ListResources(cred, options)
# Create switches and links
switches, links = parse_rspec(rspec)
exp = Experiment.objects.create(slice_id="slice_id", project_name="project name",
project_desc="project description", slice_name="slice name",
slice_desc="slice description", controller_url="controller url",
owner_email="owner email", owner_password="owner password")
for switch in switches:
exp_ip_src_s = random.randint(0,0x80000000) & 0xFFFF0000
exp_ip_src_e = random.randint(0x80000000,0xFFFFFFFF) & 0xFFFF0000
ExperimentFLowSpace.objects.create(exp=exp, dpid=switch.dpid,
ip_src_s=exp_ip_src_s,
ip_src_e=exp_ip_src_e,
)
logger.debug("Done creating sliver")
# Get user to opt in
logger.debug("Logging into browser")
b = Browser()
logged_in = b.login(SCHEME+"://%s:%s/accounts/login/"%
(test_settings.HOST, test_settings.OM_PORT),
"user","password")
self.assertTrue(logged_in,"Could not log in")
logger.debug("Login success")
# drop_to_shell(local=locals())
f = b.get_and_post_form(SCHEME+"://%s:%s/opts/opt_in"%
(test_settings.HOST, test_settings.OM_PORT),
dict(experiment=1))
logger.debug("Posted opt-in request, reading response.")
res = f.read()
self.assertEqual(f.code, 200)
self.assertTrue("successfully" in res, "Did not get successful opt in message: %s" % res)
logger.debug("Response fine, opting out.")
# now test opt out:
f = b.get_and_post_form(SCHEME+"://%s:%s/opts/opt_out"%
(test_settings.HOST, test_settings.OM_PORT),
{"1":"checked"})
res = f.read()
self.assertEqual(f.code, 200)
self.assertTrue("Successful" in res, "Returned %s"%res)
if __name__ == '__main__':
import unittest
unittest.main()
| bsd-3-clause | -1,563,624,186,552,810,200 | 35.213353 | 97 | 0.539497 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/xml/dom/pulldom.py | 1 | 10143 | import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"clear(): Explicitly release parsing objects"
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| mit | -1,154,279,993,284,917,200 | 32.586093 | 75 | 0.5944 | false |
amschaal/bioshare | bioshareX/api/views.py | 1 | 18186 | # Create your views here.
from django.core.urlresolvers import reverse
from django.http.response import JsonResponse, HttpResponse
from settings.settings import AUTHORIZED_KEYS_FILE, SITE_URL
from bioshareX.models import Share, SSHKey, MetaData, Tag
from bioshareX.forms import MetaDataForm, json_form_validate
from guardian.shortcuts import get_perms, get_users_with_perms, remove_perm, assign_perm
from bioshareX.utils import JSONDecorator, json_response, json_error, share_access_decorator, safe_path_decorator, validate_email, fetchall,\
test_path, du
from django.contrib.auth.models import User, Group
from django.db.models import Q
import os
from rest_framework.decorators import api_view, detail_route, throttle_classes,\
action
from bioshareX.forms import ShareForm
from guardian.decorators import permission_required
from bioshareX.utils import ajax_login_required, email_users
from rest_framework import generics, viewsets, status
from bioshareX.models import ShareLog, Message
from bioshareX.api.serializers import ShareLogSerializer, ShareSerializer,\
GroupSerializer, UserSerializer, MessageSerializer
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from bioshareX.permissions import ManageGroupPermission
from rest_framework.response import Response
from guardian.models import UserObjectPermission
from django.contrib.contenttypes.models import ContentType
import datetime
from bioshareX.api.filters import UserShareFilter, ShareTagFilter,\
GroupShareFilter, ActiveMessageFilter
from rest_framework.throttling import UserRateThrottle
from django.utils import timezone
import csv
@ajax_login_required
def get_user(request):
query = request.GET.get('query')
try:
user = User.objects.get(Q(username=query)|Q(email=query))
return JsonResponse({'user':UserSerializer(user).data})
except Exception, e:
return JsonResponse({'status':'error','query':query,'errors':[e.message]},status=status.HTTP_404_NOT_FOUND)
@ajax_login_required
def get_address_book(request):
try:
emails = User.objects.filter(shareuserobjectpermission__content_object__in=Share.objects.filter(owner=request.user).values_list('id')).values_list('email').distinct().order_by('email')
groups = Group.objects.all().order_by('name')
return json_response({'emails':[email[0] for email in emails], 'groups':[g.name for g in groups]})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def get_tags(request):
try:
tags = Tag.objects.filter(name__icontains=request.GET.get('tag'))
return json_response({'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([e.message])
@share_access_decorator(['admin'])
def share_with(request,share):
query = request.POST.get('query',request.GET.get('query'))
exists = []
new_users = []
groups = []
invalid = []
try:
emails = [email.strip().lower() for email in query.split(',')]
for email in emails:
if email == '':
continue
if email.startswith('group:'):
name = email.split('group:')[1].lower()
try:
group = Group.objects.get(name__iexact=name)
groups.append({'group':{'id':group.id,'name':group.name}})
except:
invalid.append(name)
elif validate_email(email):
try:
user = User.objects.get(email=email)
exists.append({'user':{'username':email}})
except:
new_users.append({'user':{'username':email}})
else:
invalid.append(email)
return json_response({'exists':exists, 'groups':groups,'new_users':new_users,'invalid':invalid})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def share_autocomplete(request):
terms = [term.strip() for term in request.GET.get('query').split()]
query = reduce(lambda q,value: q&Q(name__icontains=value), terms , Q())
try:
share_objs = Share.user_queryset(request.user).filter(query).order_by('-created')[:10]
shares = [{'id':s.id,'url':reverse('list_directory',kwargs={'share':s.id}),'name':s.name,'notes':s.notes} for s in share_objs]
return json_response({'status':'success','shares':shares})
except Exception, e:
return json_error([e.message])
def get_group(request):
query = request.GET.get('query')
try:
group = Group.objects.get(name=query)
return json_response({'group':{'name':group.name}})
except Exception, e:
return json_error([e.message])
@api_view(['GET'])
@share_access_decorator(['admin'])
def get_permissions(request,share):
data = share.get_permissions(user_specific=True)
return json_response(data)
@share_access_decorator(['admin'])
@JSONDecorator
def update_share(request,share,json=None):
share.secure = json['secure']
share.save()
return json_response({'status':'okay'})
@api_view(['POST'])
@share_access_decorator(['admin'])
@JSONDecorator
def set_permissions(request,share,json=None):
from smtplib import SMTPException
emailed=[]
created=[]
failed=[]
# if not request.user.has_perm('admin',share):
# return json_response({'status':'error','error':'You do not have permission to write to this share.'})
if json.has_key('groups'):
for group, permissions in json['groups'].iteritems():
g = Group.objects.get(id__iexact=group)
current_perms = get_perms(g,share)
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
for u in g.user_set.all():
if len(share.get_user_permissions(u,user_specific=True)) == 0 and len(added_perms) > 0 and json['email']:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(u.username)
for perm in removed_perms:
remove_perm(perm,g,share)
for perm in added_perms:
assign_perm(perm,g,share)
if json.has_key('users'):
for username, permissions in json['users'].iteritems():
username = username.lower()
try:
u = User.objects.get(username__iexact=username)
if len(share.get_user_permissions(u,user_specific=True)) == 0 and json['email']:
try:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(username)
except:
failed.append(username)
except:
if len(permissions) > 0:
password = User.objects.make_random_password()
u = User(username=username,email=username)
u.set_password(password)
u.save()
try:
email_users([u],'share/share_subject.txt','share/share_new_email_body.txt',{'user':u,'password':password,'share':share,'sharer':request.user,'site_url':SITE_URL})
created.append(username)
except:
failed.append(username)
u.delete()
current_perms = share.get_user_permissions(u,user_specific=True)
print 'CURRENT'
print current_perms
print 'PERMISSIONS'
print permissions
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
print 'ADDING: '
print added_perms
print 'REMOVING: '
print removed_perms
for perm in removed_perms:
if u.username not in failed:
remove_perm(perm,u,share)
for perm in added_perms:
if u.username not in failed:
assign_perm(perm,u,share)
data = share.get_permissions(user_specific=True)
data['messages']=[]
if len(emailed) > 0:
data['messages'].append({'type':'info','content':'%s has/have been emailed'%', '.join(emailed)})
if len(created) > 0:
data['messages'].append({'type':'info','content':'Accounts has/have been created and emails have been sent to the following email addresses: %s'%', '.join(created)})
if len(failed) > 0:
data['messages'].append({'type':'info','content':'Delivery has failed to the following addresses: %s'%', '.join(failed)})
data['json']=json
return json_response(data)
@share_access_decorator(['view_share_files'])
def search_share(request,share,subdir=None):
from bioshareX.utils import find
query = request.GET.get('query',False)
response={}
if query:
response['results'] = find(share,"*%s*"%query,subdir)
else:
response = {'status':'error'}
return json_response(response)
@safe_path_decorator()
@share_access_decorator(['write_to_share'])
def edit_metadata(request, share, subpath):
try:
if share.get_path_type(subpath) is None:
raise Exception('The specified file or folder does not exist in this share.')
metadata = MetaData.objects.get_or_create(share=share, subpath=subpath)[0]
form = MetaDataForm(request.POST if request.method == 'POST' else request.GET)
data = json_form_validate(form)
if not form.is_valid():
return json_response(data)#return json_error(form.errors)
tags = []
for tag in form.cleaned_data['tags'].split(','):
tag = tag.strip()
if len(tag) >2 :
tags.append(Tag.objects.get_or_create(name=tag)[0])
metadata.tags = tags
metadata.notes = form.cleaned_data['notes']
metadata.save()
name = os.path.basename(os.path.normpath(subpath))
return json_response({'name':name,'notes':metadata.notes,'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([str(e)])
@ajax_login_required
def delete_ssh_key(request):
try:
id = request.POST.get('id')
key = SSHKey.objects.get(user=request.user,id=id)
# subprocess.call(['/bin/chmod','600',AUTHORIZED_KEYS_FILE])
keystring = key.get_key()
# remove_me = keystring.replace('/','\\/')#re.escape(key.extract_key())
# command = ['/bin/sed','-i','/%s/d'%remove_me,AUTHORIZED_KEYS_FILE]
# subprocess.check_call(command)
f = open(AUTHORIZED_KEYS_FILE,"r")
lines = f.readlines()
f.close()
f = open(AUTHORIZED_KEYS_FILE,"w")
for line in lines:
if line.find(keystring) ==-1:
f.write(line)
f.close()
# subprocess.call(['/bin/chmod','400',AUTHORIZED_KEYS_FILE])
key.delete()
SSHKey.objects.filter(key__contains=keystring).delete()
response = {'status':'success','deleted':id}
except Exception, e:
response = {'status':'error','message':'Unable to delete ssh key'+str(e)}
return json_response(response)
"""
Requires: "name", "notes", "filesystem" arguments.
Optional: "link_to_path", "read_only"
"""
@api_view(['POST'])
@permission_required('bioshareX.add_share', return_403=True)
def create_share(request):
form = ShareForm(request.user,request.data)
if form.is_valid():
share = form.save(commit=False)
share.owner=request.user
link_to_path = request.data.get('link_to_path',None)
if link_to_path:
if not request.user.has_perm('bioshareX.link_to_path'):
return JsonResponse({'error':"You do not have permission to link to a specific path."},status=400)
try:
share.save()
except Exception, e:
share.delete()
return JsonResponse({'error':e.message},status=400)
return JsonResponse({'url':"%s%s"%(SITE_URL,reverse('list_directory',kwargs={'share':share.id})),'id':share.id})
else:
return JsonResponse({'errors':form.errors},status=400)
@ajax_login_required
@share_access_decorator(['view_share_files'])
def email_participants(request,share,subdir=None):
try:
subject = request.POST.get('subject')
emails = request.POST.getlist('emails',[])
users = [u for u in get_users_with_perms(share, attach_perms=False, with_superusers=False, with_group_users=True)]
if len(emails) > 0:
users = [u for u in User.objects.filter(id__in=[u.id for u in users]).filter(email__in=emails)]
body = request.POST.get('body')
users.append(share.owner)
email_users(users, ctx_dict={}, subject=subject, body=body,from_email=request.user.email,content_subtype='plain')
response = {'status':'success','sent_to':[u.email for u in users]}
return json_response(response)
except Exception, e:
return JsonResponse({'errors':[str(e)]},status=400)
class ShareLogList(generics.ListAPIView):
serializer_class = ShareLogSerializer
permission_classes = (IsAuthenticated,)
filter_fields = {'action':['icontains'],'user__username':['icontains'],'text':['icontains'],'paths':['icontains'],'share':['exact']}
def get_queryset(self):
shares = Share.user_queryset(self.request.user,include_stats=False)
return ShareLog.objects.filter(share__in=shares)
class ShareViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = ShareSerializer
permission_classes = (IsAuthenticated,)
filter_backends = generics.ListAPIView.filter_backends + [UserShareFilter,ShareTagFilter,GroupShareFilter]
filter_fields = {'name':['icontains'],'notes':['icontains'],'owner__username':['icontains'],'path_exists':['exact']}
ordering_fields = ('name','owner__username','created','updated','stats__num_files','stats__bytes')
def get_queryset(self):
return Share.user_queryset(self.request.user,include_stats=False).select_related('owner','stats').prefetch_related('tags','user_permissions__user','group_permissions__group')
@detail_route(['GET'])
@throttle_classes([UserRateThrottle])
def directory_size(self, request, *args, **kwargs):
share = self.get_object()
subdir = request.query_params.get('subdir','')
test_path(subdir,share=share)
size = du(os.path.join(share.get_path(),subdir))
return Response({'share':share.id,'subdir':subdir,'size':size})
@action(detail=False, methods=['GET'], permission_classes=[IsAuthenticated])
def export(self, request):
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="shares_{}.csv"'.format(str(timezone.now())[:19].replace(' ','_'))
writer = csv.writer(response, delimiter='\t')
writer.writerow(['id','name','url','users','groups','bytes','tags','owner','slug','created','updated','secure','read_only','notes','path_exists'])
for r in serializer.data:
row = [r['id'],r['name'],r['url'],', '.join(r['users']),', '.join(r['groups']),r['stats'].get('bytes') if r['stats'] else '',', '.join([t['name'] for t in r['tags']]),r['owner'].get('username'),r['slug'],r['created'],r['updated'],r['secure'],r['read_only'],r['notes'],r['path_exists'] ]
writer.writerow([c.encode('ascii', 'replace') if hasattr(c,'decode') else c for c in row])
return response
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = GroupSerializer
permission_classes = (IsAuthenticated,DjangoModelPermissions,)
filter_fields = {'name':['icontains']}
model = Group
def get_queryset(self):
if self.request.user.is_superuser or self.request.user.is_staff:
return Group.objects.all()
else:
return self.request.user.groups.all()
@detail_route(['POST'],permission_classes=[ManageGroupPermission])
def update_users(self, request, *args, **kwargs):
users = request.data.get('users')
group = self.get_object()
# old_users = GroupSerializer(group).data['users']
# old_user_ids = [u['id'] for u in old_users]
# remove_users = set(old_user_ids) - set(user_ids)
# add_users = set(user_ids) - set(old_user_ids)
group.user_set = [u['id'] for u in users]
#clear permissions
ct = ContentType.objects.get_for_model(Group)
UserObjectPermission.objects.filter(content_type=ct,object_pk=group.id).delete()
#assign permissions
for user in users:
if 'manage_group' in user['permissions']:
user = User.objects.get(id=user['id'])
assign_perm('manage_group', user, group)
return self.retrieve(request,*args,**kwargs)#Response({'status':'success'})
# @detail_route(['POST'])
# def remove_user(self,request,*args,**kwargs):
# # user = request.query_params.get('user')
# # self.get_object().user_set.remove(user)
# return Response({'status':'success'})
class MessageViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = MessageSerializer
permission_classes = (IsAuthenticated,)
filter_backends = (ActiveMessageFilter,)
model = Message
def get_queryset(self):
return Message.objects.all().order_by('-created')
# return Message.objects.filter(active=True).filter(Q(expires__gte=datetime.datetime.today())|Q(expires=None)).exclude(viewed_by__id=self.request.user.id)
@detail_route(['POST','GET'],permission_classes=[IsAuthenticated])
def dismiss(self, request, pk=None):
message = self.get_object()
message.viewed_by.add(request.user)
message.save()
return Response({'status':'Message dismissed'})
| mit | -2,161,718,598,179,195,100 | 46.236364 | 298 | 0.63186 | false |
stephenfin/patchwork | patchwork/tests/api/test_series.py | 1 | 6798 | # Patchwork - automated patch tracking system
# Copyright (C) 2018 Stephen Finucane <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import unittest
from django.conf import settings
from django.urls import reverse
from patchwork.tests.api import utils
from patchwork.tests.utils import create_cover
from patchwork.tests.utils import create_maintainer
from patchwork.tests.utils import create_patch
from patchwork.tests.utils import create_person
from patchwork.tests.utils import create_project
from patchwork.tests.utils import create_series
from patchwork.tests.utils import create_user
if settings.ENABLE_REST_API:
from rest_framework import status
@unittest.skipUnless(settings.ENABLE_REST_API, 'requires ENABLE_REST_API')
class TestSeriesAPI(utils.APITestCase):
fixtures = ['default_tags']
@staticmethod
def api_url(item=None, version=None):
kwargs = {}
if version:
kwargs['version'] = version
if item is None:
return reverse('api-series-list', kwargs=kwargs)
kwargs['pk'] = item
return reverse('api-series-detail', kwargs=kwargs)
def assertSerialized(self, series_obj, series_json):
self.assertEqual(series_obj.id, series_json['id'])
self.assertEqual(series_obj.name, series_json['name'])
self.assertEqual(series_obj.version, series_json['version'])
self.assertEqual(series_obj.total, series_json['total'])
self.assertEqual(series_obj.received_total,
series_json['received_total'])
self.assertIn(series_obj.get_mbox_url(), series_json['mbox'])
self.assertIn(series_obj.get_absolute_url(), series_json['web_url'])
# nested fields
self.assertEqual(series_obj.project.id,
series_json['project']['id'])
self.assertEqual(series_obj.submitter.id,
series_json['submitter']['id'])
self.assertEqual(series_obj.cover_letter.id,
series_json['cover_letter']['id'])
self.assertEqual(series_obj.patches.count(),
len(series_json['patches']))
def test_list_empty(self):
"""List series when none are present."""
resp = self.client.get(self.api_url())
self.assertEqual(status.HTTP_200_OK, resp.status_code)
self.assertEqual(0, len(resp.data))
def _create_series(self):
project_obj = create_project(linkname='myproject')
person_obj = create_person(email='[email protected]')
series_obj = create_series(project=project_obj, submitter=person_obj)
create_cover(series=series_obj)
create_patch(series=series_obj)
return series_obj
def test_list_anonymous(self):
"""List patches as anonymous user."""
series = self._create_series()
resp = self.client.get(self.api_url())
self.assertEqual(status.HTTP_200_OK, resp.status_code)
self.assertEqual(1, len(resp.data))
series_rsp = resp.data[0]
self.assertSerialized(series, series_rsp)
@utils.store_samples('series-list')
def test_list_authenticated(self):
"""List series as an authenticated user."""
series = self._create_series()
user = create_user()
self.client.force_authenticate(user=user)
resp = self.client.get(self.api_url())
self.assertEqual(status.HTTP_200_OK, resp.status_code)
self.assertEqual(1, len(resp.data))
series_rsp = resp.data[0]
self.assertSerialized(series, series_rsp)
def test_list_filter_project(self):
"""Filter series by project."""
series = self._create_series()
resp = self.client.get(self.api_url(), {'project': 'myproject'})
self.assertEqual([series.id], [x['id'] for x in resp.data])
resp = self.client.get(self.api_url(), {'project': 'invalidproject'})
self.assertEqual(0, len(resp.data))
def test_list_filter_owner(self):
"""Filter series by owner."""
series = self._create_series()
submitter = series.submitter
resp = self.client.get(self.api_url(), {'submitter': submitter.id})
self.assertEqual([series.id], [x['id'] for x in resp.data])
resp = self.client.get(self.api_url(), {
'submitter': '[email protected]'})
self.assertEqual([series.id], [x['id'] for x in resp.data])
resp = self.client.get(self.api_url(), {
'submitter': '[email protected]'})
self.assertEqual(0, len(resp.data))
@utils.store_samples('series-list-1-0')
def test_list_version_1_0(self):
"""List patches using API v1.0.
Validate that newer fields are dropped for older API versions.
"""
self._create_series()
resp = self.client.get(self.api_url(version='1.0'))
self.assertEqual(status.HTTP_200_OK, resp.status_code)
self.assertEqual(1, len(resp.data))
self.assertIn('url', resp.data[0])
self.assertNotIn('web_url', resp.data[0])
self.assertNotIn('web_url', resp.data[0]['cover_letter'])
self.assertNotIn('mbox', resp.data[0]['cover_letter'])
self.assertNotIn('web_url', resp.data[0]['patches'][0])
@utils.store_samples('series-detail')
def test_detail(self):
"""Show series."""
series = self._create_series()
resp = self.client.get(self.api_url(series.id))
self.assertEqual(status.HTTP_200_OK, resp.status_code)
self.assertSerialized(series, resp.data)
@utils.store_samples('series-detail-1-0')
def test_detail_version_1_0(self):
"""Show series using API v1.0."""
series = self._create_series()
resp = self.client.get(self.api_url(series.id, version='1.0'))
self.assertIn('url', resp.data)
self.assertNotIn('web_url', resp.data)
self.assertNotIn('web_url', resp.data['cover_letter'])
self.assertNotIn('mbox', resp.data['cover_letter'])
self.assertNotIn('web_url', resp.data['patches'][0])
def test_create_update_delete(self):
"""Ensure creates, updates and deletes aren't allowed"""
user = create_maintainer()
user.is_superuser = True
user.save()
self.client.force_authenticate(user=user)
resp = self.client.post(self.api_url(), {'name': 'Test'})
self.assertEqual(status.HTTP_405_METHOD_NOT_ALLOWED, resp.status_code)
series = create_series()
resp = self.client.patch(self.api_url(series.id), {'name': 'Test'})
self.assertEqual(status.HTTP_405_METHOD_NOT_ALLOWED, resp.status_code)
resp = self.client.delete(self.api_url(series.id))
self.assertEqual(status.HTTP_405_METHOD_NOT_ALLOWED, resp.status_code)
| gpl-2.0 | 8,406,626,449,727,095,000 | 37.191011 | 78 | 0.634304 | false |
tommy-u/enable | enable/wx/base_window.py | 1 | 19538 | """
Defines the concrete top-level Enable 'Window' class for the wxPython GUI
toolkit, based on the kiva agg driver.
"""
from __future__ import absolute_import
import sys
import time
import wx
from traits.api import Any, Instance, Trait
from traitsui.wx.menu import MakeMenu
# Relative imports
from enable.events import MouseEvent, KeyEvent, DragEvent
from enable.abstract_window import AbstractWindow
from .constants import DRAG_RESULTS_MAP, POINTER_MAP, KEY_MAP
try:
from pyface.wx.drag_and_drop import clipboard, PythonDropTarget
except ImportError:
clipboard = None
PythonDropTarget = None
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
# Number of pixels to scroll at a time:
scroll_incr = 16
# Reusable instance to avoid constructor/destructor overhead:
wx_rect = wx.Rect( 0, 0, 0, 0 )
# Default 'fake' start event for wxPython based drag operations:
default_start_event = MouseEvent()
# To conserve system resources, there is only one 'timer' per program:
system_timer = None
class EnableTimer ( wx.Timer ):
"""
This class maintains a 'work list' of scheduled components, where
each item in the list has the form: [ component, interval, timer_pop_time ]
"""
def __init__ ( self ):
wx.Timer.__init__( self )
self._work_list = []
return
def schedule ( self, component, interval ):
"Schedule a timer event for a specified component"
work_list = self._work_list
if len( work_list ) == 0:
self.Start( 5, oneShot=False )
for i, item in enumerate( work_list ):
if component is item[0]:
del work_list[i]
break
self.reschedule( component, interval )
return
def reschedule ( self, component, interval ):
"Reshedule a recurring timer event for a component"
pop_time = time.time() + interval
new_item = [ component, interval, pop_time ]
work_list = self._work_list
for i, item in enumerate( work_list ):
if pop_time < item[2]:
work_list.insert( i, new_item )
break
else:
work_list.append( new_item )
return
def cancel ( self, component ):
"Cancel any pending timer events for a component"
work_list = self._work_list
for i, item in enumerate( work_list ):
if component is item[0]:
del work_list[i]
if len( work_list ) == 0:
self.Stop()
break
return (len( work_list ) != 0)
def Notify ( self ):
"Handle a timer 'pop' event; used for performance testing purposes"
now = time.time()
work_list = self._work_list
n = len( work_list )
i = 0
while (i < n) and (now >= work_list[i][2]):
i += 1
if i > 0:
reschedule = work_list[:i]
del work_list[:i]
for component, interval, ignore in reschedule:
self.reschedule( component, interval )
component.timer = True
return
class LessSuckyDropTarget(PythonDropTarget):
""" The sole purpose of this class is to override the implementation
of OnDragOver() in the parent class to NOT short-circuit return the
'default_drag_result' if the drop_source is None. (The parent class
implementation basically means that everything looks like it's OK to
drop, and the DnD handler doesn't ever get a chance to intercept or
veto.)
"""
def OnDragOver(self, x, y, default_drag_result):
# This is a cut-and-paste job of the parent class implementation.
# Please refer to its comments.
if clipboard.drop_source is not None and \
not clipboard.drop_source.allow_move:
default_drag_result = wx.DragCopy
if hasattr(self.handler, 'wx_drag_over'):
drag_result = self.handler.wx_drag_over(
x, y, clipboard.data, default_drag_result
)
else:
drag_result = default_drag_result
return drag_result
class BaseWindow(AbstractWindow):
# Screen scroll increment amount:
scroll_incr = ( wx.SystemSettings_GetMetric( wx.SYS_SCREEN_Y )
or 768 ) / 20
# Width/Height of standard scrollbars:
scrollbar_dx = wx.SystemSettings_GetMetric( wx.SYS_VSCROLL_X )
scrollbar_dy = wx.SystemSettings_GetMetric( wx.SYS_HSCROLL_Y )
_cursor_color = Any # PZW: figure out the correct type for this...
# Reference to the actual wxPython window:
control = Instance(wx.Window)
# This is set by downstream components to notify us of whether or not
# the current drag operation should return DragCopy, DragMove, or DragNone.
_drag_result = Any
def __init__(self, parent, wid=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, **traits):
AbstractWindow.__init__(self, **traits)
self._timer = None
self._mouse_captured = False
# Due to wx wonkiness, we don't reliably get cursor position from
# a wx KeyEvent. Thus, we manually keep track of when we last saw
# the mouse and use that information instead. These coordinates are
# in the wx coordinate space, i.e. pre-self._flip_y().
self._last_mouse_pos = (0, 0)
# Create the delegate:
self.control = control = self._create_control(parent, wid, pos, size)
# Set up the 'erase background' event handler:
wx.EVT_ERASE_BACKGROUND( control, self._on_erase_background )
# Set up the 'paint' event handler:
wx.EVT_PAINT( control, self._paint )
wx.EVT_SIZE( control, self._on_size )
# Set up mouse event handlers:
wx.EVT_LEFT_DOWN( control, self._on_left_down )
wx.EVT_LEFT_UP( control, self._on_left_up )
wx.EVT_LEFT_DCLICK( control, self._on_left_dclick )
wx.EVT_MIDDLE_DOWN( control, self._on_middle_down )
wx.EVT_MIDDLE_UP( control, self._on_middle_up )
wx.EVT_MIDDLE_DCLICK( control, self._on_middle_dclick )
wx.EVT_RIGHT_DOWN( control, self._on_right_down )
wx.EVT_RIGHT_UP( control, self._on_right_up )
wx.EVT_RIGHT_DCLICK( control, self._on_right_dclick )
wx.EVT_MOTION( control, self._on_mouse_move )
wx.EVT_ENTER_WINDOW( control, self._on_window_enter )
wx.EVT_LEAVE_WINDOW( control, self._on_window_leave )
wx.EVT_MOUSEWHEEL( control, self._on_mouse_wheel )
# Handle key up/down events:
wx.EVT_KEY_DOWN( control, self._on_key_pressed )
wx.EVT_KEY_UP( control, self._on_key_released )
wx.EVT_CHAR( control, self._on_character )
# Attempt to allow wxPython drag and drop events to be mapped to
# Enable drag events:
# Handle window close and cleanup
wx.EVT_WINDOW_DESTROY(control, self._on_close)
if PythonDropTarget is not None:
control.SetDropTarget( LessSuckyDropTarget( self ) )
self._drag_over = []
# In some cases, on the Mac at least, we never get an initial EVT_SIZE
# since the initial size is correct. Because of this we call _on_size
# here to initialize our bounds.
self._on_size(None)
return
def _create_control(self, parent, wid, pos = wx.DefaultPosition,
size = wx.DefaultSize):
return wx.Window(parent, wid, pos, size, style = wx.CLIP_CHILDREN |
wx.WANTS_CHARS)
def _on_close(self, event):
# Might be scrollbars or other native components under
# us that are generating this event
if event.GetWindow() == self.control:
self._gc = None
wx.EVT_ERASE_BACKGROUND(self.control, None)
wx.EVT_PAINT(self.control, None)
wx.EVT_SIZE(self.control, None)
wx.EVT_LEFT_DOWN(self.control, None)
wx.EVT_LEFT_UP(self.control, None)
wx.EVT_LEFT_DCLICK(self.control, None)
wx.EVT_MIDDLE_DOWN(self.control, None)
wx.EVT_MIDDLE_UP(self.control, None)
wx.EVT_MIDDLE_DCLICK(self.control, None)
wx.EVT_RIGHT_DOWN(self.control, None)
wx.EVT_RIGHT_UP(self.control, None)
wx.EVT_RIGHT_DCLICK(self.control, None)
wx.EVT_MOTION(self.control, None)
wx.EVT_ENTER_WINDOW(self.control, None)
wx.EVT_LEAVE_WINDOW(self.control, None)
wx.EVT_MOUSEWHEEL(self.control, None)
wx.EVT_KEY_DOWN(self.control, None)
wx.EVT_KEY_UP(self.control, None)
wx.EVT_CHAR(self.control, None)
wx.EVT_WINDOW_DESTROY(self.control, None)
self.control.SetDropTarget(None)
self.control = None
self.component.cleanup(self)
self.component.parent = None
self.component.window = None
self.component = None
return
def _flip_y ( self, y ):
"Convert from a Kiva to a wxPython y coordinate"
return int( self._size[1] - 1 - y )
def _on_erase_background ( self, event ):
pass
def _on_size ( self, event ):
dx, dy = self.control.GetSizeTuple()
# do nothing if the new and old sizes are the same
if (self.component.outer_width, self.component.outer_height) == (dx, dy):
return
self.resized = (dx, dy)
if getattr(self.component, "fit_window", False):
self.component.outer_position = [0,0]
self.component.outer_bounds = [dx, dy]
elif hasattr(self.component, "resizable"):
if "h" in self.component.resizable:
self.component.outer_x = 0
self.component.outer_width = dx
if "v" in self.component.resizable:
self.component.outer_y = 0
self.component.outer_height = dy
self.control.Refresh()
return
def _capture_mouse ( self ):
"Capture all future mouse events"
if not self._mouse_captured:
self._mouse_captured = True
self.control.CaptureMouse()
return
def _release_mouse ( self ):
"Release the mouse capture"
if self._mouse_captured:
self._mouse_captured = False
self.control.ReleaseMouse()
return
def _on_key_pressed(self, event):
handled = self._handle_key_event('key_pressed', event)
if not handled:
event.Skip()
def _on_key_released(self, event):
handled = self._handle_key_event('key_released', event)
if not handled:
event.Skip()
def _create_key_event(self, event_type, event):
""" Convert a GUI toolkit keyboard event into a KeyEvent.
"""
if self.focus_owner is None:
focus_owner = self.component
else:
focus_owner = self.focus_owner
if focus_owner is not None:
if event_type == 'character':
key = unichr(event.GetUniChar())
if not key:
return None
else:
key_code = event.GetKeyCode()
if key_code in KEY_MAP:
key = KEY_MAP.get(key_code)
else:
key = unichr(event.GetUniChar()).lower()
# Use the last-seen mouse coordinates instead of GetX/GetY due
# to wx bug.
x, y = self._last_mouse_pos
# Someday when wx does this properly, we can use these instead:
# x = event.GetX()
# y = event.GetY()
return KeyEvent(
event_type = event_type,
character = key,
alt_down = event.AltDown(),
control_down = event.ControlDown(),
shift_down = event.ShiftDown(),
x = x,
y = self._flip_y(y),
event = event,
window = self)
else:
event.Skip()
return None
def _create_mouse_event ( self, event ):
"Convert a GUI toolkit mouse event into a MouseEvent"
if event is not None:
x = event.GetX()
y = event.GetY()
self._last_mouse_pos = (x, y)
mouse_wheel = ((event.GetLinesPerAction() *
event.GetWheelRotation()) /
(event.GetWheelDelta() or 1))
# Note: The following code fixes a bug in wxPython that returns
# 'mouse_wheel' events in screen coordinates, rather than window
# coordinates:
if float(wx.VERSION_STRING[:3]) < 2.8:
if mouse_wheel != 0 and sys.platform == "win32":
x, y = self.control.ScreenToClientXY( x, y )
return MouseEvent( x = x,
y = self._flip_y( y ),
alt_down = event.AltDown(),
control_down = event.ControlDown(),
shift_down = event.ShiftDown(),
left_down = event.LeftIsDown(),
middle_down = event.MiddleIsDown(),
right_down = event.RightIsDown(),
mouse_wheel = mouse_wheel,
window = self )
# If no event specified, make one up:
x, y = wx.GetMousePosition()
x, y = self.control.ScreenToClientXY( x, y )
self._last_mouse_pos = (x, y)
return MouseEvent( x = x,
y = self._flip_y( y ),
alt_down = self.alt_pressed,
control_down = self.ctrl_pressed,
shift_down = self.shift_pressed,
left_down = False,
middle_down = False,
right_down = False,
mouse_wheel = 0,
window = self)
def _create_gc(self, size, pix_format=None):
"Create a Kiva graphics context of a specified size"
raise NotImplementedError
def _redraw(self, coordinates=None):
"Request a redraw of the window"
if coordinates is None:
if self.control:
self.control.Refresh(False)
else:
xl, yb, xr, yt = coordinates
rect = wx_rect
rect.SetX( int( xl ) )
rect.SetY( int( self._flip_y( yt - 1 ) ) )
rect.SetWidth( int( xr - xl ) )
rect.SetHeight( int( yt - yb ) )
if self.control:
self.control.Refresh(False, rect)
return
def _get_control_size ( self ):
"Get the size of the underlying toolkit control"
result = None
if self.control:
result = self.control.GetSizeTuple()
return result
def _window_paint ( self, event):
"Do a GUI toolkit specific screen update"
raise NotImplementedError
def set_pointer ( self, pointer ):
"Set the current pointer (i.e. cursor) shape"
ptr = POINTER_MAP[ pointer ]
if type( ptr ) is int:
POINTER_MAP[ pointer ] = ptr = wx.StockCursor( ptr )
self.control.SetCursor( ptr )
return
def set_tooltip ( self, tooltip ):
"Set the current tooltip for the window"
wx.ToolTip_Enable( False )
self.control.SetToolTip( wx.ToolTip( tooltip ) )
wx.ToolTip_Enable( True )
return
def set_timer_interval ( self, component, interval ):
""" Set up or cancel a timer for a specified component. To cancel the
timer, set interval=None """
global system_timer
if interval is None:
if ((system_timer is not None) and
(not system_timer.cancel( component ))):
system_timer = None
else:
if system_timer is None:
system_timer = EnableTimer()
system_timer.schedule( component, interval )
return
def _set_focus ( self ):
"Sets the keyboard focus to this window"
self.control.SetFocus()
return
def screen_to_window(self, x, y):
pt = wx.Point(x,y)
x,y = self.control.ScreenToClient(pt)
y = self._flip_y(y)
return x,y
def get_pointer_position(self):
"Returns the current pointer position in local window coordinates"
pos = wx.GetMousePosition()
return self.screen_to_window(pos.x, pos.y)
def set_drag_result(self, result):
if result not in DRAG_RESULTS_MAP:
raise RuntimeError, "Unknown drag result '%s'" % result
self._drag_result = DRAG_RESULTS_MAP[result]
return
def wx_dropped_on ( self, x, y, drag_object, drop_result ):
"Handle wxPython drag and drop events"
# Process the 'dropped_on' event for the object(s) it was dropped on:
y = self._flip_y(y)
drag_event = DragEvent(x=x, y=y, obj=drag_object, window=self)
self._drag_result = wx.DragNone
if self.component.is_in(x, y):
self.component.dispatch(drag_event, "dropped_on")
# If a downstream component wants to express that it handled the
return self._drag_result
def wx_drag_over ( self, x, y, drag_object, drag_result ):
y = self._flip_y( y )
drag_over_event = DragEvent( x = x,
y = y,
x0 = 0.0,
y0 = 0.0,
copy = drag_result != wx.DragMove,
obj = drag_object,
start_event = default_start_event,
window = self )
# By default, don't indicate that we can be dropped on. It is up
# to the component to set this correctly.
self._drag_result = wx.DragNone
if self.component.is_in(x, y):
self.component.dispatch(drag_over_event, "drag_over")
return self._drag_result
def wx_drag_leave ( self, drag_object ):
drag_leave_event = DragEvent( x = 0.0,
y = 0.0,
x0 = 0.0,
y0 = 0.0,
copy = False,
obj = drag_object,
start_event = default_start_event,
window = self )
self.component.dispatch(drag_leave_event, "drag_leave")
return
def create_menu ( self, menu_definition, owner ):
"Create a wxMenu from a string description"
return MakeMenu( menu_definition, owner, True, self.control )
def popup_menu ( self, menu, x, y ):
"Pop-up a wxMenu at a specified location"
self.control.PopupMenuXY( menu.menu, int(x), int( self._flip_y(y) ) )
return
# EOF
| bsd-3-clause | -7,273,876,677,575,929,000 | 36.144487 | 81 | 0.542788 | false |
rwl/PyCIM | CIM14/CDPSM/Balanced/IEC61970/Wires/Switch.py | 1 | 2127 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Balanced.IEC61970.Core.ConductingEquipment import ConductingEquipment
class Switch(ConductingEquipment):
"""A generic device designed to close, or open, or both, one or more electric circuits.
"""
def __init__(self, normalOpen=False, *args, **kw_args):
"""Initialises a new 'Switch' instance.
@param normalOpen: The attribute is used in cases when no Measurement for the status value is present. If the Switch has a status measurment the Discrete.normalValue is expected to match with the Switch.normalOpen.
"""
#: The attribute is used in cases when no Measurement for the status value is present. If the Switch has a status measurment the Discrete.normalValue is expected to match with the Switch.normalOpen.
self.normalOpen = normalOpen
super(Switch, self).__init__(*args, **kw_args)
_attrs = ["normalOpen"]
_attr_types = {"normalOpen": bool}
_defaults = {"normalOpen": False}
_enums = {}
_refs = []
_many_refs = []
| mit | -5,704,789,517,243,640,000 | 48.465116 | 223 | 0.735778 | false |
mvidalgarcia/indico | indico/modules/rb/controllers/backend/admin.py | 1 | 22187 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from io import BytesIO
from flask import jsonify, request, session
from marshmallow import missing, validate
from marshmallow_enum import EnumField
from sqlalchemy.orm import joinedload
from webargs import fields
from webargs.flaskparser import abort
from werkzeug.exceptions import Forbidden, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.marshmallow import mm
from indico.modules.categories.models.categories import Category
from indico.modules.rb import logger, rb_settings
from indico.modules.rb.controllers import RHRoomBookingBase
from indico.modules.rb.controllers.backend.rooms import RHRoomsPermissions
from indico.modules.rb.models.equipment import EquipmentType, RoomEquipmentAssociation
from indico.modules.rb.models.locations import Location
from indico.modules.rb.models.map_areas import MapArea
from indico.modules.rb.models.photos import Photo
from indico.modules.rb.models.principals import RoomPrincipal
from indico.modules.rb.models.room_attributes import RoomAttribute, RoomAttributeAssociation
from indico.modules.rb.models.room_features import RoomFeature
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.admin import (create_area, delete_areas, update_area, update_room,
update_room_attributes, update_room_availability, update_room_equipment)
from indico.modules.rb.schemas import (AdminRoomSchema, RoomAttributeValuesSchema, admin_equipment_type_schema,
admin_locations_schema, bookable_hours_schema, map_areas_schema,
nonbookable_periods_schema, room_attribute_schema, room_equipment_schema,
room_feature_schema, room_update_schema)
from indico.modules.rb.util import (build_rooms_spritesheet, get_resized_room_photo, rb_is_admin,
remove_room_spritesheet_photo)
from indico.util.i18n import _
from indico.util.marshmallow import ModelList, Principal, PrincipalList, PrincipalPermissionList
from indico.web.args import use_args, use_kwargs
from indico.web.flask.util import send_file
from indico.web.util import ExpectedError
class RHRoomBookingAdminBase(RHRoomBookingBase):
def _check_access(self):
RHRoomBookingBase._check_access(self)
if not rb_is_admin(session.user):
raise Forbidden
class SettingsSchema(mm.Schema):
admin_principals = PrincipalList(allow_groups=True)
authorized_principals = PrincipalList(allow_groups=True)
tileserver_url = fields.String(validate=[
validate.URL(schemes={'http', 'https'}),
lambda value: all(x in value for x in ('{x}', '{y}', '{z}'))
], allow_none=True)
booking_limit = fields.Int(validate=[validate.Range(min=1)])
notifications_enabled = fields.Bool()
notification_before_days = fields.Int(validate=[validate.Range(min=1, max=30)])
notification_before_days_weekly = fields.Int(validate=[validate.Range(min=1, max=30)])
notification_before_days_monthly = fields.Int(validate=[validate.Range(min=1, max=30)])
end_notifications_enabled = fields.Bool()
end_notification_daily = fields.Int(validate=[validate.Range(min=1, max=30)])
end_notification_weekly = fields.Int(validate=[validate.Range(min=1, max=30)])
end_notification_monthly = fields.Int(validate=[validate.Range(min=1, max=30)])
excluded_categories = ModelList(Category)
grace_period = fields.Int(validate=[validate.Range(min=0, max=24)], allow_none=True)
class RHSettings(RHRoomBookingAdminBase):
def _jsonify_settings(self):
return SettingsSchema().jsonify(rb_settings.get_all())
def _process_GET(self):
return self._jsonify_settings()
@use_args(SettingsSchema)
def _process_PATCH(self, args):
rb_settings.set_multi(args)
return self._jsonify_settings()
class RHLocations(RHRoomBookingAdminBase):
def _process_args(self):
id_ = request.view_args.get('location_id')
self.location = Location.get_one(id_, is_deleted=False) if id_ is not None else None
def _jsonify_one(self, location):
return jsonify(admin_locations_schema.dump(location, many=False))
def _jsonify_many(self):
query = Location.query.filter_by(is_deleted=False)
return jsonify(admin_locations_schema.dump(query.all()))
def _process_GET(self):
if self.location:
return self._jsonify_one(self.location)
else:
return self._jsonify_many()
def _process_DELETE(self):
# XXX: we could safely allow deleting any locations regardless of whether there
# are rooms now that we soft-delete them. but it's probably safer to disallow
# deletion of locations with rooms, simply to prevent accidental deletions.
if self.location.rooms:
raise ExpectedError(_('Cannot delete location with active rooms'))
self.location.is_deleted = True
logger.info('Location %r deleted by %r', self.location, session.user)
# this code currently doesn't do anything since we don't allow deleting locations
# that have non-deleted rooms, but if we change this in the future it's needed
for room in self.location.rooms:
logger.info('Deleting room %r', room)
room.is_deleted = True
db.session.flush()
return '', 204
@use_kwargs({
'name': fields.String(required=True),
'room_name_format': fields.String(validate=[
lambda value: all(x in value for x in ('{building}', '{floor}', '{number}'))
], required=True),
'map_url_template': fields.URL(schemes={'http', 'https'}, allow_none=True, missing=''),
})
def _process_POST(self, name, room_name_format, map_url_template):
self._check_conflict(name)
loc = Location(name=name, room_name_format=room_name_format, map_url_template=(map_url_template or ''))
db.session.add(loc)
db.session.flush()
return self._jsonify_one(loc), 201
@use_kwargs({
'name': fields.String(),
'room_name_format': fields.String(validate=[
lambda value: all(x in value for x in ('{building}', '{floor}', '{number}'))
]),
'map_url_template': fields.URL(schemes={'http', 'https'}, allow_none=True),
})
def _process_PATCH(self, name=None, room_name_format=None, map_url_template=missing):
if name is not None:
self._check_conflict(name)
self.location.name = name
if room_name_format is not None:
self.location.room_name_format = room_name_format
if map_url_template is not missing:
self.location.map_url_template = map_url_template or ''
db.session.flush()
return self._jsonify_one(self.location)
def _check_conflict(self, name):
query = Location.query.filter(~Location.is_deleted, db.func.lower(Location.name) == name.lower())
if self.location:
query = query.filter(Location.id != self.location.id)
if query.has_rows():
abort(422, messages={'name': [_('Name must be unique')]})
class RHFeatures(RHRoomBookingAdminBase):
def _process_args(self):
id_ = request.view_args.get('feature_id')
self.feature = RoomFeature.get_one(id_) if id_ is not None else None
def _dump_features(self):
query = RoomFeature.query.order_by(RoomFeature.title)
return room_feature_schema.dump(query, many=True)
def _jsonify_one(self, equipment_type):
return jsonify(room_feature_schema.dump(equipment_type))
def _jsonify_many(self):
return jsonify(self._dump_features())
def _process_GET(self):
if self.feature:
return self._jsonify_one(self.feature)
else:
return self._jsonify_many()
def _process_DELETE(self):
db.session.delete(self.feature)
db.session.flush()
return '', 204
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2), required=True),
'title': fields.String(validate=validate.Length(min=2), required=True),
'icon': fields.String(missing=''),
})
def _process_POST(self, name, title, icon):
self._check_conflict(name)
feature = RoomFeature(name=name, title=title, icon=icon)
db.session.add(feature)
db.session.flush()
return self._jsonify_one(feature), 201
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2)),
'title': fields.String(validate=validate.Length(min=2)),
'icon': fields.String(),
})
def _process_PATCH(self, name=None, title=None, icon=None):
if name is not None:
self._check_conflict(name)
self.feature.name = name
if title is not None:
self.feature.title = title
if icon is not None:
self.feature.icon = icon
db.session.flush()
return self._jsonify_one(self.feature)
def _check_conflict(self, name):
query = RoomFeature.query.filter(db.func.lower(RoomFeature.name) == name.lower())
if self.feature:
query = query.filter(RoomFeature.id != self.feature.id)
if query.has_rows():
abort(422, messages={'name': [_('Name must be unique')]})
class RHEquipmentTypes(RHRoomBookingAdminBase):
def _process_args(self):
id_ = request.view_args.get('equipment_type_id')
self.equipment_type = EquipmentType.get_one(id_) if id_ is not None else None
def _dump_equipment_types(self):
query = EquipmentType.query.options(joinedload('features')).order_by(EquipmentType.name)
return admin_equipment_type_schema.dump(query, many=True)
def _get_room_counts(self):
query = (db.session.query(RoomEquipmentAssociation.c.equipment_id, db.func.count())
.group_by(RoomEquipmentAssociation.c.equipment_id))
return dict(query)
def _jsonify_one(self, equipment_type):
counts = self._get_room_counts()
eq = admin_equipment_type_schema.dump(equipment_type)
eq['num_rooms'] = counts.get(eq['id'], 0)
return jsonify(eq)
def _jsonify_many(self):
counts = self._get_room_counts()
equipment_types = self._dump_equipment_types()
for eq in equipment_types:
eq['num_rooms'] = counts.get(eq['id'], 0)
return jsonify(equipment_types)
def _process_GET(self):
if self.equipment_type:
return self._jsonify_one(self.equipment_type)
else:
return self._jsonify_many()
def _process_DELETE(self):
db.session.delete(self.equipment_type)
db.session.flush()
return '', 204
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2), required=True),
'features': ModelList(RoomFeature, missing=[])
})
def _process_POST(self, name, features):
self._check_conflict(name)
equipment_type = EquipmentType(name=name, features=features)
db.session.add(equipment_type)
db.session.flush()
return self._jsonify_one(equipment_type), 201
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2)),
'features': ModelList(RoomFeature)
})
def _process_PATCH(self, name=None, features=None):
if name is not None:
self._check_conflict(name)
self.equipment_type.name = name
if features is not None:
self.equipment_type.features = features
db.session.flush()
return self._jsonify_one(self.equipment_type)
def _check_conflict(self, name):
query = EquipmentType.query.filter(db.func.lower(EquipmentType.name) == name.lower())
if self.equipment_type:
query = query.filter(EquipmentType.id != self.equipment_type.id)
if query.has_rows():
abort(422, messages={'name': [_('Name must be unique')]})
class RHAttributes(RHRoomBookingAdminBase):
def _process_args(self):
id_ = request.view_args.get('attribute_id')
self.attribute = RoomAttribute.get_one(id_) if id_ is not None else None
def _dump_attributes(self):
query = RoomAttribute.query.order_by(RoomAttribute.title)
return room_attribute_schema.dump(query, many=True)
def _get_room_counts(self):
query = (db.session.query(RoomAttributeAssociation.attribute_id, db.func.count())
.group_by(RoomAttributeAssociation.attribute_id))
return dict(query)
def _jsonify_one(self, attribute):
counts = self._get_room_counts()
attr = room_attribute_schema.dump(attribute)
attr['num_rooms'] = counts.get(attr['id'], 0)
return jsonify(attr)
def _jsonify_many(self):
counts = self._get_room_counts()
attributes = self._dump_attributes()
for attr in attributes:
attr['num_rooms'] = counts.get(attr['id'], 0)
return jsonify(attributes)
def _process_GET(self):
if self.attribute:
return self._jsonify_one(self.attribute)
else:
return self._jsonify_many()
def _process_DELETE(self):
db.session.delete(self.attribute)
db.session.flush()
return '', 204
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2), required=True),
'title': fields.String(validate=validate.Length(min=2), required=True),
'hidden': fields.Bool(missing=False),
})
def _process_POST(self, name, title, hidden):
self._check_conflict(name)
attribute = RoomAttribute(name=name, title=title, is_hidden=hidden)
db.session.add(attribute)
db.session.flush()
return self._jsonify_one(attribute), 201
@use_kwargs({
'name': fields.String(validate=validate.Length(min=2)),
'title': fields.String(validate=validate.Length(min=2)),
'hidden': fields.Bool(),
})
def _process_PATCH(self, name=None, title=None, hidden=None):
if name is not None:
self._check_conflict(name)
self.attribute.name = name
if title is not None:
self.attribute.title = title
if hidden is not None:
self.attribute.is_hidden = hidden
db.session.flush()
return self._jsonify_one(self.attribute)
def _check_conflict(self, name):
query = RoomAttribute.query.filter(db.func.lower(RoomAttribute.name) == name.lower())
if self.attribute:
query = query.filter(RoomAttribute.id != self.attribute.id)
if query.has_rows():
abort(422, messages={'name': [_('Name must be unique')]})
class RHRoomAdminBase(RHRoomBookingAdminBase):
def _process_args(self):
self.room = Room.get_one(request.view_args['room_id'], is_deleted=False)
class RHRoomAttributes(RHRoomAdminBase):
def _process(self):
return RoomAttributeValuesSchema(many=True, only=('name', 'value')).jsonify(self.room.attributes)
class RHUpdateRoomAttributes(RHRoomAdminBase):
@use_kwargs({'attributes': fields.Nested({'value': fields.Str(),
'name': fields.Str()}, many=True)})
def _process(self, attributes):
update_room_attributes(self.room, attributes)
return '', 204
class RHRoomAvailability(RHRoomAdminBase):
def _process(self):
return jsonify(
nonbookable_periods=nonbookable_periods_schema.dump(self.room.nonbookable_periods, many=True),
bookable_hours=bookable_hours_schema.dump(self.room.bookable_hours, many=True)
)
class RHUpdateRoomAvailability(RHRoomAdminBase):
@use_args({'bookable_hours': fields.Nested({'start_time': fields.Time(),
'end_time': fields.Time()}, many=True),
'nonbookable_periods': fields.Nested({'start_dt': fields.Date(),
'end_dt': fields.Date()}, many=True)})
def _process(self, args):
if 'bookable_hours' in args:
self._check_invalid_times(args)
update_room_availability(self.room, args)
return jsonify(
nonbookable_periods=nonbookable_periods_schema.dump(self.room.nonbookable_periods, many=True),
bookable_hours=bookable_hours_schema.dump(self.room.bookable_hours, many=True)
)
def _check_invalid_times(self, availability):
if any(bh['start_time'] >= bh['end_time'] for bh in availability['bookable_hours']):
abort(422, messages={'bookable_hours': [_('Start time should not be later than end time')]})
class RHRoomEquipment(RHRoomAdminBase):
def _process(self):
return jsonify(room_equipment_schema.dump(self.room))
class RHUpdateRoomEquipment(RHRoomAdminBase):
@use_args({
'available_equipment': fields.List(fields.Int(), required=True)
})
def _process(self, args):
update_room_equipment(self.room, args['available_equipment'])
return jsonify(room_update_schema.dump(self.room, many=False))
room_update_args = {
'verbose_name': fields.Str(allow_none=True),
'site': fields.Str(allow_none=True),
'building': fields.String(validate=lambda x: x is not None),
'floor': fields.String(validate=lambda x: x is not None),
'number': fields.String(validate=lambda x: x is not None),
'longitude': fields.Float(allow_none=True),
'latitude': fields.Float(allow_none=True),
'is_reservable': fields.Bool(allow_none=True),
'reservations_need_confirmation': fields.Bool(allow_none=True),
'notification_emails': fields.List(fields.Email()),
'notification_before_days': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'notification_before_days_weekly': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'notification_before_days_monthly': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'notifications_enabled': fields.Bool(),
'end_notification_daily': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'end_notification_weekly': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'end_notification_monthly': fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True),
'end_notifications_enabled': fields.Bool(),
'booking_limit_days': fields.Int(validate=lambda x: x >= 1, allow_none=True),
'owner': Principal(validate=lambda x: x is not None, allow_none=True),
'key_location': fields.Str(),
'telephone': fields.Str(),
'capacity': fields.Int(validate=lambda x: x >= 1),
'division': fields.Str(allow_none=True),
'surface_area': fields.Int(validate=lambda x: x >= 0, allow_none=True),
'max_advance_days': fields.Int(validate=lambda x: x >= 1, allow_none=True),
'comments': fields.Str(),
'acl_entries': PrincipalPermissionList(RoomPrincipal),
'protection_mode': EnumField(ProtectionMode)
}
class RHRoom(RHRoomAdminBase):
def _process_GET(self):
return jsonify(room_update_schema.dump(self.room))
@use_args(room_update_args)
def _process_PATCH(self, args):
update_room(self.room, args)
RHRoomsPermissions._jsonify_user_permissions.clear_cached(session.user)
return '', 204
def _process_DELETE(self):
logger.info('Room %r deleted by %r', self.room, session.user)
self.room.is_deleted = True
return '', 204
class RHRoomPhoto(RHRoomAdminBase):
def _process_GET(self):
if not self.room.has_photo:
raise NotFound
return send_file('room.jpg', BytesIO(get_resized_room_photo(self.room)), 'image/jpeg')
def _process_DELETE(self):
self.room.photo = None
remove_room_spritesheet_photo(self.room)
return '', 204
def _process_POST(self):
photo = request.files['photo'].read()
self.room.photo = Photo(data=photo)
token = build_rooms_spritesheet()
return jsonify(rooms_sprite_token=unicode(token))
class RHRooms(RHRoomBookingAdminBase):
def _process_GET(self):
rooms = Room.query.filter_by(is_deleted=False).order_by(db.func.indico.natsort(Room.full_name)).all()
return AdminRoomSchema().jsonify(rooms, many=True)
@use_args(dict(room_update_args, **{
'location_id': fields.Int(required=True),
}))
def _process_POST(self, args):
room = Room()
update_room(room, args)
db.session.add(room)
db.session.flush()
RHRoomsPermissions._jsonify_user_permissions.clear_cached(session.user)
return jsonify(id=room.id)
_base_args = {
'default': fields.Bool(),
'bounds': fields.Nested({
'north_east': fields.Nested({'lat': fields.Float(), 'lng': fields.Float()}, required=True),
'south_west': fields.Nested({'lat': fields.Float(), 'lng': fields.Float()}, required=True)
}, required=True)
}
_create_args = dict(_base_args, **{
'name': fields.String(required=True)
})
_update_args = {
'areas': fields.List(
fields.Nested(dict(_base_args, **{
'id': fields.Int(required=True),
'name': fields.String()
}), required=True)
)
}
class RHMapAreas(RHRoomBookingAdminBase):
@use_args(_create_args)
def _process_POST(self, args):
create_area(**args)
return map_areas_schema.jsonify(MapArea.query)
@use_kwargs(_update_args)
def _process_PATCH(self, areas):
for area in areas:
update_area(area.pop('id'), area)
return map_areas_schema.jsonify(MapArea.query)
@use_kwargs({
'area_ids': fields.List(fields.Int(), required=True)
})
def _process_DELETE(self, area_ids):
delete_areas(area_ids)
return '', 204
| mit | 8,321,943,753,006,878,000 | 39.193841 | 120 | 0.647947 | false |
ramineni/myironic | ironic/tests/conductor/test_rpcapi.py | 1 | 10964 | # coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for :py:class:`ironic.conductor.rpcapi.ConductorAPI`.
"""
import copy
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import manager as conductor_manager
from ironic.conductor import rpcapi as conductor_rpcapi
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.db import base
from ironic.tests.db import utils as dbutils
CONF = cfg.CONF
class ConductorRPCAPITestCase(tests_base.TestCase):
def test_versions_in_sync(self):
self.assertEqual(
conductor_manager.ConductorManager.RPC_API_VERSION,
conductor_rpcapi.ConductorAPI.RPC_API_VERSION)
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_node = dbutils.get_test_node(driver='fake-driver')
self.fake_node_obj = objects.Node._from_db_object(
objects.Node(self.context),
self.fake_node)
def test_serialized_instance_has_uuid(self):
self.assertTrue('uuid' in self.fake_node)
def test_get_topic_for_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['other-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
def test_get_topic_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_driver_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def test_get_topic_for_driver_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['other-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
def test_get_topic_for_driver_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(kwargs[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi.client, rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(self.context, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [self.context, method, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_update_node(self):
self._test_rpcapi('update_node',
'call',
version='1.1',
node_obj=self.fake_node)
def test_change_node_power_state(self):
self._test_rpcapi('change_node_power_state',
'call',
version='1.6',
node_id=self.fake_node['uuid'],
new_state=states.POWER_ON)
def test_vendor_passthru(self):
self._test_rpcapi('vendor_passthru',
'call',
version='1.20',
node_id=self.fake_node['uuid'],
driver_method='test-driver-method',
http_method='test-http-method',
info={"test_info": "test_value"})
def test_driver_vendor_passthru(self):
self._test_rpcapi('driver_vendor_passthru',
'call',
version='1.20',
driver_name='test-driver-name',
driver_method='test-driver-method',
http_method='test-http-method',
info={'test_key': 'test_value'})
def test_do_node_deploy(self):
self._test_rpcapi('do_node_deploy',
'call',
version='1.22',
node_id=self.fake_node['uuid'],
rebuild=False,
configdrive=None)
def test_do_node_tear_down(self):
self._test_rpcapi('do_node_tear_down',
'call',
version='1.6',
node_id=self.fake_node['uuid'])
def test_validate_driver_interfaces(self):
self._test_rpcapi('validate_driver_interfaces',
'call',
version='1.5',
node_id=self.fake_node['uuid'])
def test_destroy_node(self):
self._test_rpcapi('destroy_node',
'call',
version='1.9',
node_id=self.fake_node['uuid'])
def test_get_console_information(self):
self._test_rpcapi('get_console_information',
'call',
version='1.11',
node_id=self.fake_node['uuid'])
def test_set_console_mode(self):
self._test_rpcapi('set_console_mode',
'call',
version='1.11',
node_id=self.fake_node['uuid'],
enabled=True)
def test_update_port(self):
fake_port = dbutils.get_test_port()
self._test_rpcapi('update_port',
'call',
version='1.13',
port_obj=fake_port)
def test_get_driver_properties(self):
self._test_rpcapi('get_driver_properties',
'call',
version='1.16',
driver_name='fake-driver')
def test_set_boot_device(self):
self._test_rpcapi('set_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'],
device=boot_devices.DISK,
persistent=False)
def test_get_boot_device(self):
self._test_rpcapi('get_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_supported_boot_devices(self):
self._test_rpcapi('get_supported_boot_devices',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_node_vendor_passthru_methods(self):
self._test_rpcapi('get_node_vendor_passthru_methods',
'call',
version='1.21',
node_id=self.fake_node['uuid'])
def test_get_driver_vendor_passthru_methods(self):
self._test_rpcapi('get_driver_vendor_passthru_methods',
'call',
version='1.21',
driver_name='fake-driver')
| apache-2.0 | -4,673,336,276,116,043,000 | 37.605634 | 79 | 0.528913 | false |
mathiasertl/fabric | tests/test_tasks.py | 1 | 18968 | from __future__ import with_statement
from fudge import Fake, patched_context, with_fakes
import unittest
from nose.tools import raises, ok_
import random
import sys
import fabric
from fabric.tasks import WrappedCallableTask, execute, Task, get_task_details
from fabric.main import display_command
from fabric.api import run, env, settings, hosts, roles, hide, parallel, task, runs_once, serial
from fabric.exceptions import NetworkError
from mock_streams import mock_streams
from utils import eq_, FabricTest, aborts, support
from server import server
def test_base_task_provides_undefined_name():
task = Task()
eq_("undefined", task.name)
@raises(NotImplementedError)
def test_base_task_raises_exception_on_call_to_run():
task = Task()
task.run()
class TestWrappedCallableTask(unittest.TestCase):
def test_passes_unused_args_to_parent(self):
args = [i for i in range(random.randint(1, 10))]
def foo(): pass
try:
WrappedCallableTask(foo, *args)
except TypeError:
msg = "__init__ raised a TypeError, meaning args weren't handled"
self.fail(msg)
def test_passes_unused_kwargs_to_parent(self):
random_range = range(random.randint(1, 10))
kwargs = dict([("key_%s" % i, i) for i in random_range])
def foo(): pass
try:
WrappedCallableTask(foo, **kwargs)
except TypeError:
self.fail(
"__init__ raised a TypeError, meaning kwargs weren't handled")
def test_allows_any_number_of_args(self):
args = [i for i in range(random.randint(0, 10))]
def foo(): pass
WrappedCallableTask(foo, *args)
def test_allows_any_number_of_kwargs(self):
kwargs = dict([("key%d" % i, i) for i in range(random.randint(0, 10))])
def foo(): pass
WrappedCallableTask(foo, **kwargs)
def test_run_is_wrapped_callable(self):
def foo(): pass
task = WrappedCallableTask(foo)
eq_(task.wrapped, foo)
def test_name_is_the_name_of_the_wrapped_callable(self):
def foo(): pass
foo.__name__ = "random_name_%d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.name, foo.__name__)
def test_name_can_be_overridden(self):
def foo(): pass
eq_(WrappedCallableTask(foo).name, 'foo')
eq_(WrappedCallableTask(foo, name='notfoo').name, 'notfoo')
def test_reads_double_under_doc_from_callable(self):
def foo(): pass
foo.__doc__ = "Some random __doc__: %d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.__doc__, foo.__doc__)
def test_dispatches_to_wrapped_callable_on_run(self):
random_value = "some random value %d" % random.randint(1000, 2000)
def foo(): return random_value
task = WrappedCallableTask(foo)
eq_(random_value, task())
def test_passes_all_regular_args_to_run(self):
def foo(*args): return args
random_args = tuple(
[random.randint(1000, 2000) for i in range(random.randint(1, 5))]
)
task = WrappedCallableTask(foo)
eq_(random_args, task(*random_args))
def test_passes_all_keyword_args_to_run(self):
def foo(**kwargs): return kwargs
random_kwargs = {}
for i in range(random.randint(1, 5)):
random_key = ("foo", "bar", "baz", "foobar", "barfoo")[i]
random_kwargs[random_key] = random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(random_kwargs, task(**random_kwargs))
def test_calling_the_object_is_the_same_as_run(self):
random_return = random.randint(1000, 2000)
def foo(): return random_return
task = WrappedCallableTask(foo)
eq_(task(), task.run())
class TestTask(unittest.TestCase):
def test_takes_an_alias_kwarg_and_wraps_it_in_aliases_list(self):
random_alias = "alias_%d" % random.randint(100, 200)
task = Task(alias=random_alias)
self.assertTrue(random_alias in task.aliases)
def test_aliases_are_set_based_on_provided_aliases(self):
aliases = ["a_%d" % i for i in range(random.randint(1, 10))]
task = Task(aliases=aliases)
self.assertTrue(all([a in task.aliases for a in aliases]))
def test_aliases_are_None_by_default(self):
task = Task()
self.assertTrue(task.aliases is None)
# Reminder: decorator syntax, e.g.:
# @foo
# def bar():...
#
# is semantically equivalent to:
# def bar():...
# bar = foo(bar)
#
# this simplifies testing :)
def test_decorator_incompatibility_on_task():
from fabric.decorators import task, hosts, runs_once, roles
def foo(): return "foo"
foo = task(foo)
# since we aren't setting foo to be the newly decorated thing, its cool
hosts('me@localhost')(foo)
runs_once(foo)
roles('www')(foo)
def test_decorator_closure_hiding():
"""
@task should not accidentally destroy decorated attributes from @hosts/etc
"""
from fabric.decorators import task, hosts
def foo():
print(env.host_string)
foo = task(hosts("me@localhost")(foo))
eq_(["me@localhost"], foo.hosts)
#
# execute()
#
def dict_contains(superset, subset):
"""
Assert that all key/val pairs in dict 'subset' also exist in 'superset'
"""
for key, value in subset.iteritems():
ok_(key in superset)
eq_(superset[key], value)
class TestExecute(FabricTest):
@with_fakes
def test_calls_task_function_objects(self):
"""
should execute the passed-in function object
"""
execute(Fake(callable=True, expect_call=True))
@with_fakes
def test_should_look_up_task_name(self):
"""
should also be able to handle task name strings
"""
name = 'task1'
commands = {name: Fake(callable=True, expect_call=True)}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@with_fakes
def test_should_handle_name_of_Task_object(self):
"""
handle corner case of Task object referrred to by name
"""
name = 'task2'
class MyTask(Task):
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
mytask.name = name
commands = {name: mytask}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@aborts
def test_should_abort_if_task_name_not_found(self):
"""
should abort if given an invalid task name
"""
execute('thisisnotavalidtaskname')
def test_should_not_abort_if_task_name_not_found_with_skip(self):
"""
should not abort if given an invalid task name
and skip_unknown_tasks in env
"""
env.skip_unknown_tasks = True
execute('thisisnotavalidtaskname')
del env['skip_unknown_tasks']
@with_fakes
def test_should_pass_through_args_kwargs(self):
"""
should pass in any additional args, kwargs to the given task.
"""
task = (
Fake(callable=True, expect_call=True)
.with_args('foo', biz='baz')
)
execute(task, 'foo', biz='baz')
@with_fakes
def test_should_honor_hosts_kwarg(self):
"""
should use hosts kwarg to set run list
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
hosts = hostlist[:]
# Side-effect which asserts the value of env.host_string when it runs
def host_string():
eq_(env.host_string, hostlist.pop(0))
task = Fake(callable=True, expect_call=True).calls(host_string)
with hide('everything'):
execute(task, hosts=hosts)
def test_should_honor_hosts_decorator(self):
"""
should honor @hosts on passed-in task objects
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
@hosts(*hostlist[:])
def task():
eq_(env.host_string, hostlist.pop(0))
with hide('running'):
execute(task)
def test_should_honor_roles_decorator(self):
"""
should honor @roles on passed-in task objects
"""
# Make two full copies of a host list
roledefs = {'role1': ['a', 'b', 'c']}
role_copy = roledefs['role1'][:]
@roles('role1')
def task():
eq_(env.host_string, role_copy.pop(0))
with settings(hide('running'), roledefs=roledefs):
execute(task)
@with_fakes
def test_should_set_env_command_to_string_arg(self):
"""
should set env.command to any string arg, if given
"""
name = "foo"
def command():
eq_(env.command, name)
task = Fake(callable=True, expect_call=True).calls(command)
with patched_context(fabric.state, 'commands', {name: task}):
execute(name)
@with_fakes
def test_should_set_env_command_to_name_attr(self):
"""
should set env.command to TaskSubclass.name if possible
"""
name = "foo"
def command():
eq_(env.command, name)
task = (
Fake(callable=True, expect_call=True)
.has_attr(name=name)
.calls(command)
)
execute(task)
@with_fakes
def test_should_set_all_hosts(self):
"""
should set env.all_hosts to its derived host list
"""
hosts = ['a', 'b']
roledefs = {'r1': ['c', 'd']}
roles = ['r1']
exclude_hosts = ['a']
def command():
eq_(set(env.all_hosts), set(['b', 'c', 'd']))
task = Fake(callable=True, expect_call=True).calls(command)
with settings(hide('everything'), roledefs=roledefs):
execute(
task, hosts=hosts, roles=roles, exclude_hosts=exclude_hosts
)
@mock_streams('stdout')
def test_should_print_executing_line_per_host(self):
"""
should print "Executing" line once per host
"""
def task():
pass
execute(task, hosts=['host1', 'host2'])
eq_(sys.stdout.getvalue(), """[host1] Executing task 'task'
[host2] Executing task 'task'
""")
@mock_streams('stdout')
def test_should_not_print_executing_line_for_singletons(self):
"""
should not print "Executing" line for non-networked tasks
"""
def task():
pass
with settings(hosts=[]): # protect against really odd test bleed :(
execute(task)
eq_(sys.stdout.getvalue(), "")
def test_should_return_dict_for_base_case(self):
"""
Non-network-related tasks should return a dict w/ special key
"""
def task():
return "foo"
eq_(execute(task), {'<local-only>': 'foo'})
@server(port=2200)
@server(port=2201)
def test_should_return_dict_for_serial_use_case(self):
"""
Networked but serial tasks should return per-host-string dict
"""
ports = [2200, 2201]
hosts = map(lambda x: '127.0.0.1:%s' % x, ports)
def task():
run("ls /simple")
return "foo"
with hide('everything'):
eq_(execute(task, hosts=hosts), {
'127.0.0.1:2200': 'foo',
'127.0.0.1:2201': 'foo'
})
@server()
def test_should_preserve_None_for_non_returning_tasks(self):
"""
Tasks which don't return anything should still show up in the dict
"""
def local_task():
pass
def remote_task():
with hide('everything'):
run("ls /simple")
eq_(execute(local_task), {'<local-only>': None})
with hide('everything'):
eq_(
execute(remote_task, hosts=[env.host_string]),
{env.host_string: None}
)
def test_should_use_sentinel_for_tasks_that_errored(self):
"""
Tasks which errored but didn't abort should contain an eg NetworkError
"""
def task():
run("whoops")
host_string = 'localhost:1234'
with settings(hide('everything'), skip_bad_hosts=True):
retval = execute(task, hosts=[host_string])
assert isinstance(retval[host_string], NetworkError)
@server(port=2200)
@server(port=2201)
def test_parallel_return_values(self):
"""
Parallel mode should still return values as in serial mode
"""
@parallel
@hosts('127.0.0.1:2200', '127.0.0.1:2201')
def task():
run("ls /simple")
return env.host_string.split(':')[1]
with hide('everything'):
retval = execute(task)
eq_(retval, {'127.0.0.1:2200': '2200', '127.0.0.1:2201': '2201'})
@with_fakes
def test_should_work_with_Task_subclasses(self):
"""
should work for Task subclasses, not just WrappedCallableTask
"""
class MyTask(Task):
name = "mytask"
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
execute(mytask)
@server(port=2200)
@server(port=2201)
def test_nested_execution_with_explicit_ports(self):
"""
nested executions should work with defined ports
"""
def expect_host_string_port():
eq_(env.port, '2201')
return "bar"
def expect_env_port():
eq_(env.port, '2202')
def expect_per_host_config_port():
eq_(env.port, '664')
run = execute(expect_default_config_port, hosts=['some_host'])
return run['some_host']
def expect_default_config_port():
# uses `Host *` in ssh_config
eq_(env.port, '666')
return "bar"
def main_task():
eq_(env.port, '2200')
execute(expect_host_string_port, hosts=['localhost:2201'])
with settings(port='2202'):
execute(expect_env_port, hosts=['localhost'])
with settings(
use_ssh_config=True,
ssh_config_path=support("ssh_config")
):
run = execute(expect_per_host_config_port, hosts='myhost')
return run['myhost']
run = execute(main_task, hosts=['localhost:2200'])
eq_(run['localhost:2200'], 'bar')
class TestExecuteEnvInteractions(FabricTest):
def set_network(self):
# Don't update env.host/host_string/etc
pass
@server(port=2200)
@server(port=2201)
def test_should_not_mutate_its_own_env_vars(self):
"""
internal env changes should not bleed out, but task env changes should
"""
# Task that uses a handful of features which involve env vars
@parallel
@hosts('[email protected]:2200', '[email protected]:2201')
def mytask():
run("ls /simple")
# Pre-assertions
assertions = {
'parallel': False,
'all_hosts': [],
'host': None,
'hosts': [],
'host_string': None
}
for key, value in assertions.items():
eq_(env[key], value)
# Run
with hide('everything'):
result = execute(mytask)
eq_(len(result), 2)
# Post-assertions
for key, value in assertions.items():
eq_(env[key], value)
@server()
def test_should_allow_task_to_modify_env_vars(self):
@hosts('[email protected]:2200')
def mytask():
run("ls /simple")
env.foo = "bar"
with hide('everything'):
execute(mytask)
eq_(env.foo, "bar")
eq_(env.host_string, None)
class TestTaskDetails(unittest.TestCase):
def test_old_style_task_with_default_args(self):
"""
__details__() should print docstr for old style task methods with default args
"""
def task_old_style(arg1, arg2, arg3=None, arg4='yes'):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2, arg3=None, arg4='yes'",
details)
def test_old_style_task_without_default_args(self):
"""
__details__() should print docstr for old style task methods without default args
"""
def task_old_style(arg1, arg2):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2",
details)
def test_old_style_task_without_args(self):
"""
__details__() should print docstr for old style task methods without args
"""
def task_old_style():
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: ",
details)
def test_decorated_task(self):
"""
__details__() should print docstr for method with any number and order of decorations
"""
expected = "\n".join([
"Docstring",
"Arguments: arg1",
])
@task
def decorated_task(arg1):
'''Docstring'''
actual = decorated_task.__details__()
eq_(expected, actual)
@runs_once
@task
def decorated_task1(arg1):
'''Docstring'''
actual = decorated_task1.__details__()
eq_(expected, actual)
@runs_once
@serial
@task
def decorated_task2(arg1):
'''Docstring'''
actual = decorated_task2.__details__()
eq_(expected, actual)
def test_subclassed_task(self):
"""
__details__() should print docstr for subclassed task methods with args
"""
class SpecificTask(Task):
def run(self, arg1, arg2, arg3):
'''Docstring'''
eq_("Docstring\n"
"Arguments: self, arg1, arg2, arg3",
SpecificTask().__details__())
@mock_streams('stdout')
def test_multiline_docstring_indented_correctly(self):
"""
display_command() should properly indent docstr for old style task methods
"""
def mytask(arg1):
"""
This is a multi line docstring.
For reals.
"""
try:
with patched_context(fabric.state, 'commands', {'mytask': mytask}):
display_command('mytask')
except SystemExit: # ugh
pass
eq_(
sys.stdout.getvalue(),
"""Displaying detailed information for task 'mytask':
This is a multi line docstring.
For reals.
Arguments: arg1
"""
)
| bsd-2-clause | -2,933,276,217,915,369,000 | 29.642973 | 96 | 0.561525 | false |
bryanrtboy/videoselector | installation_text.py | 1 | 2688 | #!/usr/bin/python
from pssh import SSHClient, ParallelSSHClient, utils
import datetime
import time
import random
import sys
output = []
hosts = ['client0', 'client1', 'client2','client3', 'client4']
client = ParallelSSHClient(hosts)
values = ["bear","cake","fork","pipe","gun"]
def open_movies(my_values, delay):
choices = list(my_values)
for x in range(len(hosts)):
if x < len(hosts) - 1:
prompt = "Type "
for v in choices:
prompt += v + ", "
prompt = prompt[:-2]
prompt += " :"
choice = get_valid_input(prompt)
choices.remove(choice.lower())
open_movie(choice, x)
else:
choice = choices[0]
open_movie(choice, x)
print("wait {0} seconds".format(delay))
time.sleep(delay)
print("done waiting, back to the command and play idle movies on clients")
cmds = ["~/dbuscontrol.sh stop", "sleep 2", "omxplayer /mnt/usb/media/intro.mp4 --aspect-mode=stretch --loop"]
#run all the commands on all the clients
for cmd in cmds:
client.run_command(cmd, stop_on_errors=False)
#show a prompt to decide what to do next
next = raw_input("Hit return to continue or 'Q' to quit:")
if next == "Q":
print("quitting")
exit()
else:
open_movies()
def open_movie(choice, clientID) :
one_client = SSHClient(hosts[clientID])
num = random.randint(0,2)
command = "~/dbuscontrol.sh stop"
one_client.exec_command(command)
command = "omxplayer /mnt/usb/media/" + choice + "/mov_" + str(num) + ".mp4 --aspect-mode=stretch --loop"
one_client.exec_command(command)
print("Opening a " +choice+ " movie, number " + str(num) + " on " + hosts[clientID] + "!")
def get_valid_input(prompt):
while True:
data = raw_input(prompt)
#check if the entered word is in our list of values
if data.lower() not in values:
print("Not an appropriate choice.")
else:
break
return data
#if you need to get a response back from the client, use this functio
#instead of open_movies().
#Note with --loop argument in cmds, the process will never quit
#requires CTRL-C to end the process
def open_movies_wait_for_output():
cmds = ["omxplayer /mnt/usb/media/gun/mov_0.mp4 --aspect-mode=stretch --loop"]
start = datetime.datetime.now()
for cmd in cmds:
output.append(client.run_command(cmd, stop_on_errors=False))
end = datetime.datetime.now()
print("Started %s commands on %s host(s) in %s" % (
len(cmds), len(hosts), end-start,))
start = datetime.datetime.now()
for _output in output:
print("waiting for output")
client.join(_output)
print(_output)
end = datetime.datetime.now()
print("All commands finished in %s" % (end-start,))
if __name__ == "__main__":
open_movies(values, 15)
| mit | 5,243,929,373,050,166,000 | 28.538462 | 111 | 0.663318 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.