repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/rewrite/rewritepolicy_rewriteglobal_binding.py | 1 | 6060 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rewritepolicy_rewriteglobal_binding(base_resource) :
""" Binding class showing the rewriteglobal that can be bound to rewritepolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the rewrite policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the rewrite policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rewritepolicy_rewriteglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rewritepolicy_rewriteglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch rewritepolicy_rewriteglobal_binding resources.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count rewritepolicy_rewriteglobal_binding resources configued on NetScaler.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class rewritepolicy_rewriteglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.rewritepolicy_rewriteglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rewritepolicy_rewriteglobal_binding = [rewritepolicy_rewriteglobal_binding() for _ in range(length)]
| apache-2.0 | -7,462,817,870,828,477,000 | 26.420814 | 137 | 0.701155 | false |
tysonholub/twilio-python | tests/integration/api/v2010/account/test_outgoing_caller_id.py | 1 | 7342 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class OutgoingCallerIdTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"friendly_name": "(415) 867-5309",
"phone_number": "+141586753096",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"friendly_name": "(415) 867-5309",
"phone_number": "+141586753096",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/OutgoingCallerIds.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?Page=0&PageSize=50",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?Page=0&PageSize=50",
"next_page_uri": null,
"num_pages": 1,
"outgoing_caller_ids": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 21 Aug 2009 00:11:24 +0000",
"date_updated": "Fri, 21 Aug 2009 00:11:24 +0000",
"friendly_name": "(415) 867-5309",
"phone_number": "+141586753096",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?Page=0&PageSize=50",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json?Page=0&PageSize=50",
"next_page_uri": null,
"num_pages": 1,
"outgoing_caller_ids": [],
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.outgoing_caller_ids.list()
self.assertIsNotNone(actual)
| mit | -3,558,865,847,001,543,000 | 40.247191 | 150 | 0.573005 | false |
1tush/reviewboard | reviewboard/reviews/models/base_review_request_details.py | 1 | 9919 | from __future__ import unicode_literals
import re
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import JSONField
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.markdown_utils import markdown_escape
from reviewboard.reviews.models.default_reviewer import DefaultReviewer
from reviewboard.scmtools.errors import InvalidChangeNumberError
@python_2_unicode_compatible
class BaseReviewRequestDetails(models.Model):
"""Base information for a review request and draft.
ReviewRequest and ReviewRequestDraft share a lot of fields and
methods. This class provides those fields and methods for those
classes.
"""
MAX_SUMMARY_LENGTH = 300
summary = models.CharField(_("summary"), max_length=MAX_SUMMARY_LENGTH)
description = models.TextField(_("description"), blank=True)
testing_done = models.TextField(_("testing done"), blank=True)
bugs_closed = models.CharField(_("bugs"), max_length=300, blank=True)
branch = models.CharField(_("branch"), max_length=300, blank=True)
rich_text = models.BooleanField(_("rich text"), default=False)
commit_id = models.CharField(_('commit ID'), max_length=64, blank=True,
null=True, db_index=True)
extra_data = JSONField(null=True)
def get_review_request(self):
raise NotImplementedError
def get_bug_list(self):
"""Returns a list of bugs associated with this review request."""
if self.bugs_closed == "":
return []
bugs = list(set(re.split(r"[, ]+", self.bugs_closed)))
# First try a numeric sort, to show the best results for the majority
# case of bug trackers with numeric IDs. If that fails, sort
# alphabetically.
try:
bugs.sort(key=int)
except ValueError:
bugs.sort()
return bugs
def get_screenshots(self):
"""Returns the list of all screenshots on a review request.
This includes all current screenshots, but not previous ones.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
"""
review_request = self.get_review_request()
for screenshot in self.screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_inactive_screenshots(self):
"""Returns the list of all inactive screenshots on a review request.
This only includes screenshots that were previously visible but
have since been removed.
By accessing screenshots through this method, future review request
lookups from the screenshots will be avoided.
"""
review_request = self.get_review_request()
for screenshot in self.inactive_screenshots.all():
screenshot._review_request = review_request
yield screenshot
def get_file_attachments(self):
"""Returns the list of all file attachments on a review request.
This includes all current file attachments, but not previous ones.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
"""
review_request = self.get_review_request()
for file_attachment in self.file_attachments.all():
file_attachment._review_request = review_request
yield file_attachment
def get_inactive_file_attachments(self):
"""Returns all inactive file attachments on a review request.
This only includes file attachments that were previously visible
but have since been removed.
By accessing file attachments through this method, future review
request lookups from the file attachments will be avoided.
"""
review_request = self.get_review_request()
for file_attachment in self.inactive_file_attachments.all():
file_attachment._review_request = review_request
yield file_attachment
def add_default_reviewers(self):
"""Add default reviewers based on the diffset.
This method goes through the DefaultReviewer objects in the database
and adds any missing reviewers based on regular expression comparisons
with the set of files in the diff.
"""
diffset = self.get_latest_diffset()
if not diffset:
return
people = set()
groups = set()
# TODO: This is kind of inefficient, and could maybe be optimized in
# some fancy way. Certainly the most superficial optimization that
# could be made would be to cache the compiled regexes somewhere.
files = diffset.files.all()
reviewers = DefaultReviewer.objects.for_repository(self.repository,
self.local_site)
for default in reviewers:
try:
regex = re.compile(default.file_regex)
except:
continue
for filediff in files:
if regex.match(filediff.source_file or filediff.dest_file):
for person in default.people.all():
people.add(person)
for group in default.groups.all():
groups.add(group)
break
existing_people = self.target_people.all()
for person in people:
if person not in existing_people:
self.target_people.add(person)
existing_groups = self.target_groups.all()
for group in groups:
if group not in existing_groups:
self.target_groups.add(group)
def update_from_commit_id(self, commit_id):
"""Updates the data from a server-side changeset.
If the commit ID refers to a pending changeset on an SCM which stores
such things server-side (like perforce), the details like the summary
and description will be updated with the latest information.
If the change number is the commit ID of a change which exists on the
server, the summary and description will be set from the commit's
message, and the diff will be fetched from the SCM.
"""
scmtool = self.repository.get_scmtool()
changeset = None
if scmtool.supports_pending_changesets:
changeset = scmtool.get_changeset(commit_id, allow_empty=True)
if changeset and changeset.pending:
self.update_from_pending_change(commit_id, changeset)
elif self.repository.supports_post_commit:
self.update_from_committed_change(commit_id)
else:
if changeset:
raise InvalidChangeNumberError()
else:
raise NotImplementedError()
def update_from_pending_change(self, commit_id, changeset):
"""Updates the data from a server-side pending changeset.
This will fetch the metadata from the server and update the fields on
the review request.
"""
if not changeset:
raise InvalidChangeNumberError()
# If the SCM supports changesets, they should always include a number,
# summary and description, parsed from the changeset description. Some
# specialized systems may support the other fields, but we don't want
# to clobber the user-entered values if they don't.
self.commit = commit_id
if self.rich_text:
description = markdown_escape(changeset.description)
testing_done = markdown_escape(changeset.testing_done)
else:
description = changeset.description
testing_done = changeset.testing_done
self.summary = changeset.summary
self.description = description
if testing_done:
self.testing_done = testing_done
if changeset.branch:
self.branch = changeset.branch
if changeset.bugs_closed:
self.bugs_closed = ','.join(changeset.bugs_closed)
def update_from_committed_change(self, commit_id):
"""Updates from a committed change present on the server.
Fetches the commit message and diff from the repository and sets the
relevant fields.
"""
commit = self.repository.get_change(commit_id)
summary, message = commit.split_message()
self.commit = commit_id
self.summary = summary.strip()
if self.rich_text:
self.description = markdown_escape(message.strip())
else:
self.description = message.strip()
DiffSet.objects.create_from_data(
repository=self.repository,
diff_file_name='diff',
diff_file_contents=commit.diff.encode('utf-8'),
parent_diff_file_name=None,
parent_diff_file_contents=None,
diffset_history=self.get_review_request().diffset_history,
basedir='/',
request=None)
def save(self, **kwargs):
self.bugs_closed = self.bugs_closed.strip()
self.summary = self._truncate(self.summary, self.MAX_SUMMARY_LENGTH)
super(BaseReviewRequestDetails, self).save(**kwargs)
def _truncate(self, string, num):
if len(string) > num:
string = string[0:num]
i = string.rfind('.')
if i != -1:
string = string[0:i + 1]
return string
def __str__(self):
if self.summary:
return six.text_type(self.summary)
else:
return six.text_type(_('(no summary)'))
class Meta:
abstract = True
app_label = 'reviews'
| mit | -8,082,577,163,745,702,000 | 34.679856 | 78 | 0.633229 | false |
Clarity-89/clarityv2 | src/clarityv2/accounts/managers.py | 1 | 1420 | from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
| mit | -5,557,580,562,345,280,000 | 40.764706 | 79 | 0.661268 | false |
GalPressman/matrigram | docs/conf.py | 1 | 10481 | # -*- coding: utf-8 -*-
#
# matrigram documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 20 11:09:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../matrigram'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'matrigram'
copyright = u'2016, Gal Pressman & Yuval Fatael'
author = u'Gal Pressman & Yuval Fatael'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'GalPressman',
'github_repo': 'matrigram',
'github_banner': True,
'github_button': True,
'travis_button': True,
'show_powered_by': False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'matrigram v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'logo.jpg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [
'about.html',
'badges.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'matrigramdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'matrigram.tex', u'matrigram Documentation',
u'Gal Pressman \\& Yuval Fatael', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matrigram', u'matrigram Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'matrigram', u'matrigram Documentation',
author, 'matrigram', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | 2,914,533,651,014,203,000 | 27.793956 | 80 | 0.68753 | false |
migasfree/migasfree | migasfree/stats/views/software.py | 1 | 6246 | # -*- coding: utf-8 -*-
import json
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.db.models import Count
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from ...server.models import Project, Store, Package
from ...catalog.models import Application
def application_by_category():
total = Application.objects.count()
link = '{}?_REPLACE_'.format(
reverse('admin:catalog_application_changelist')
)
data = []
for item in Application.objects.values(
'category',
).annotate(
count=Count('category')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': '{}'.format(dict(Application.CATEGORIES)[item.get('category')]),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'category__exact={}'.format(item.get('category'))
),
})
return {
'title': _('Applications / Category'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def application_by_level():
total = Application.objects.count()
link = '{}?_REPLACE_'.format(
reverse('admin:catalog_application_changelist')
)
data = []
for item in Application.objects.values(
'level',
).annotate(
count=Count('level')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': '{}'.format(dict(Application.LEVELS)[item.get('level')]),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'level__exact={}'.format(item.get('level'))
),
})
return {
'title': _('Applications / Level'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def package_by_store(user):
total = Package.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_package_changelist')
)
values = defaultdict(list)
for item in Package.objects.scope(user).values(
'project__id', 'store__id', 'store__name'
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values[item.get('project__id')].append(
{
'name': item.get('store__name'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&store__id__exact={}'.format(
item.get('project__id'),
item.get('store__id'),
)
),
}
)
data = []
for project in Project.objects.scope(user).all():
if project.id in values:
count = sum(item.get('value') for item in values[project.id])
percent = float(count) / total * 100
data.append(
{
'name': project.name,
'value': count,
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(project.id)
),
'data': values[project.id]
}
)
return {
'title': _('Packages / Store'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def store_by_project(user):
total = Store.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_store_changelist')
)
data = []
for item in Store.objects.scope(user).values(
'project__name',
'project__id',
).annotate(
count=Count('id')
).order_by('-count'):
percent = float(item.get('count')) / total * 100
data.append({
'name': item.get('project__name'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(item.get('project__id'))
),
})
return {
'title': _('Stores / Project'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
@login_required
def stores_summary(request):
user = request.user.userprofile
return render(
request,
'stores_summary.html',
{
'title': _('Stores'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'store_by_project': store_by_project(user),
'opts': Store._meta,
}
)
@login_required
def packages_summary(request):
user = request.user.userprofile
return render(
request,
'packages_summary.html',
{
'title': _('Packages/Sets'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'package_by_store': package_by_store(user),
'opts': Package._meta,
}
)
@login_required
def applications_summary(request):
return render(
request,
'applications_summary.html',
{
'title': _('Applications'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'application_by_category': application_by_category(),
'application_by_level': application_by_level(),
'opts': Application._meta,
}
)
| gpl-3.0 | 6,193,939,635,404,802,000 | 27.651376 | 84 | 0.490394 | false |
kapilgarg1996/mospy | mospy/GUI.py | 1 | 2367 | import os
import pickle
from Tkinter import *
from PIL import ImageTk, Image
import tkMessageBox
import tkFileDialog
from ttk import Frame, Button, Label, Style
from random import randint
from PIL import Image
import mosaic
class MainFrame(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.pack(fill=BOTH, expand=0)
Button(self, text = "Select Image Dataset Directory", command = lambda: openDir(self)).grid(row=0, column=0, pady=5)
self.dirName = StringVar()
Label(self, textvariable=self.dirName).grid(row=0, column=1, columnspan=2, pady=5, sticky=W)
Button(self, text = "Select File", command = lambda: openFile(self)).grid(row=1, column=0, pady=5)
self.fileName = StringVar()
Label(self, textvariable=self.fileName).grid(row=1, column=1, columnspan=2, pady=5, sticky=W)
self.iamgelabel = Label(self)
self.iamgelabel.grid(row=1, column=3)
Label(self, text = "Enter Number of Grids: ").grid(row=2, column=0, pady=5)
self.entry = Entry(self, bd=5)
self.entry.grid(row=2, column=1, pady=5, sticky=W)
Button(self, text = "CREATE", command = lambda: startMosaic(self.dirName.get(), self.fileName.get(), self.entry.get(), self.parent)).grid(row=3, column=0, pady=5)
def openDir(app):
dirName = tkFileDialog.askdirectory(initialdir='./')
app.dirName.set(dirName)
def openFile (app):
dirName = app.dirName.get()
if not os.path.isdir(dirName):
dirName = './'
fileName = tkFileDialog.askopenfilename(initialdir = dirName)
app.fileName.set(fileName)
size = 64, 64
img = Image.open(fileName)
img.thumbnail(size)
imgtk = ImageTk.PhotoImage(img)
app.iamgelabel.configure(image=imgtk)
app.iamgelabel.image = imgtk
def startMosaic(dirName, fileName, num_grids, frame):
wind = Toplevel(frame)
try:
mosaic.build_mosaic(fileName, num_grids=int(num_grids), root=wind, datasetdir=dirName)
except ValueError:
mosaic.build_mosaic(fileName, root=wind, datasetdir=dirName)
def main():
root = Tk()
size = 220, 220
root.title('MOSPY')
app = MainFrame(root)
root.geometry("480x360")
root.mainloop()
| mit | 8,678,145,406,439,883,000 | 28.6 | 170 | 0.646388 | false |
pxg/pxg.github.io | slides/deployment/python/5_two_step_deploy.py | 1 | 2664 | """
By default the HEAD of the current branch will be pushed to the remote server:
fab stage deploy
This can be overridden by providing a hash:
fab stage deploy:2ab1c583e35c99b66079877d49e3ec03812d3e53
If you don't like all the output:
fab stage deploy --hide=stdout
"""
import os
from fabric.api import env, execute, local, parallel
from fabric.operations import run, put
from fabric.context_managers import cd, prefix
from fabric.decorators import roles
def stage():
env.roledefs = {
'web': ['[email protected]', '[email protected]'],
'master': ['[email protected]']
}
env.user = 'ec2-user'
# Note: the site root is now /var/www/goatse.cx/current
# Previous releases can be found in /var/www/goatse.cx/releases/<hash>
env.release_dir = '/var/www/goatse.cx'
env.key_filename = ['~/.ssh/goatse.pem/']
env.git_repo_dir = '/var/www/git_goatase/'
env.venv_activate = '/var/lib/venv/goatse/bin/activate'
def deploy(id='HEAD'):
"""
Main tasks to update the server from the given commit id, will use the
HEAD of the current branch by default
"""
release_dir = prepare_deploy(id)
activate_deploy(release_dir)
def prepare_deploy(id='HEAD'):
"""
Execute all steps which can in advance of actually switching the site live
This is done to speed up activating deployments
"""
packaged_code, release_dir = _package_code(id)
execute(deploy_package, packaged_code, release_dir)
execute(install_requirements, release_dir)
execute(backup_database)
execute(collectstatic, release_dir)
_clean_up(packaged_code)
return release_dir
def activate_deploy(release_dir):
"""
Switch the deployment to being live. This is the risk zone where downtime
could potentially happen.
"""
execute(migrate_database, release_dir)
execute(switch_release, release_dir)
execute(reload_server)
def _package_code(id):
"""
Locally compress the git repo into an archive, and generate the release dir
variable
"""
hash = local('git rev-parse %s' % id, capture=True)
file = '%s.tar.gz' % hash
local('git archive --format tar.gz %s -o %s' % (id, file))
release_dir = os.path.join(env.release_dir, 'releases', hash)
return file, release_dir
@parallel
@roles('web')
def deploy_package(file, release_dir):
"""
Move the packaged code to the webservers
"""
run('mkdir -p %s' % release_dir)
put(file, release_dir)
with cd(release_dir):
run('tar -xf %s' % file)
def _clean_up(packaged_code):
"""
Delete the packaged code
"""
local('rm %s' % packaged_code)
| mit | 5,292,689,465,113,000,000 | 26.75 | 79 | 0.67042 | false |
btrent/knave | pychess/Utils/EndgameTable.py | 1 | 2387 | from gobject import GObject, SIGNAL_RUN_FIRST
from Move import Move
from lutils.egtb_k4it import egtb_k4it
from lutils.egtb_gaviota import egtb_gaviota
providers = []
class EndgameTable(GObject):
""" Wrap the low-level providers of exact endgame knowledge. """
__gsignals__ = {
"scored": (SIGNAL_RUN_FIRST, None, (object,)),
}
def __init__ (self):
GObject.__init__(self)
global providers
if not providers:
providers = [ egtb_gaviota(), egtb_k4it() ]
self.providers = providers
def _pieceCounts (self, board):
return sorted([ bin(board.friends[i]).count("1") for i in range(2) ])
def scoreGame (self, lBoard, omitDepth=False, probeSoft=False):
""" Return result and depth to mate. (Intended for engine use.)
lBoard: A low-level board structure
omitDepth: Look up only the game's outcome (may save time)
probeSoft: Fail if the probe would require disk or network access.
Return value:
game_result: Either WHITEWON, DRAW, BLACKWON, or (on failure) None
depth: Depth to mate, or (if omitDepth or the game is drawn) None
"""
pc = self._pieceCounts(lBoard)
for provider in self.providers:
if provider.supports(pc):
result, depth = provider.scoreGame(lBoard, needDepth, probeSoft)
if result is not None:
return result, depth
return None, None
def scoreAllMoves (self, lBoard):
""" Return each move's result and depth to mate.
lBoard: A low-level board structure
Return value: a list, with best moves first, of:
move: A high-level move structure
game_result: Either WHITEWON, DRAW, BLACKWON
depth: Depth to mate
"""
pc = self._pieceCounts(lBoard)
for provider in self.providers:
if provider.supports(pc):
results = provider.scoreAllMoves(lBoard)
if results:
ret = []
for lMove, result, depth in results:
ret.append( (Move(lMove), result, depth) )
#self.emit("scored", (lBoard, ret))
return ret
return []
| gpl-3.0 | -4,503,694,866,982,983,700 | 34.626866 | 80 | 0.560117 | false |
martey/django-shortcodes | shortcodes/parser.py | 1 | 1822 | import re
import shortcodes.parsers
from django.core.cache import cache
def import_parser(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def parse(value):
ex = re.compile(r'\[(.*?)\]')
groups = ex.findall(value)
pieces = {}
parsed = value
for item in groups:
if ' ' in item:
name, space, args = item.partition(' ')
args = __parse_args__(args)
# If shortcode does not use spaces as a separator, it might use equals
# signs.
elif '=' in item:
name, space, args = item.partition('=')
args = __parse_args__(args)
else:
name = item
args = {}
item = re.escape(item)
try:
if cache.get(item):
parsed = re.sub(r'\[' + item + r'\]', cache.get(item), parsed)
else:
module = import_parser('shortcodes.parsers.' + name)
function = getattr(module, 'parse')
result = function(args)
cache.set(item, result, 3600)
parsed = re.sub(r'\[' + item + r'\]', result, parsed)
except ImportError:
pass
return parsed
def __parse_args__(value):
ex = re.compile(r'[ ]*(\w+)=([^" ]+|"[^"]*")[ ]*(?: |$)')
groups = ex.findall(value)
kwargs = {}
for group in groups:
if group.__len__() == 2:
item_key = group[0]
item_value = group[1]
if item_value.startswith('"'):
if item_value.endswith('"'):
item_value = item_value[1:]
item_value = item_value[:item_value.__len__() - 1]
kwargs[item_key] = item_value
return kwargs
| mit | -7,133,755,439,112,442,000 | 26.606061 | 78 | 0.487925 | false |
edison7500/dugong | apps/photos/migrations/0004_auto_20200313_1554.py | 1 | 1198 | # Generated by Django 2.2.11 on 2020-03-13 07:54
import apps.images.handlers
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("photos", "0003_auto_20200306_1424")]
operations = [
migrations.AlterModelOptions(
name="category",
options={"verbose_name": "分类", "verbose_name_plural": "分类"},
),
migrations.AddField(
model_name="exif",
name="shot_time",
field=models.DateTimeField(
db_index=True,
default=django.utils.timezone.now,
editable=False,
),
),
migrations.AlterField(
model_name="category",
name="image",
field=models.ImageField(
blank=True,
null=True,
upload_to=apps.images.handlers.hexdigest_filename,
),
),
migrations.AlterField(
model_name="photo",
name="file",
field=models.ImageField(
upload_to=apps.images.handlers.hexdigest_filename
),
),
]
| gpl-3.0 | -897,384,506,030,921,200 | 27.333333 | 72 | 0.529412 | false |
jn2840/bitcoin | qa/rpc-tests/signrawtransactions.py | 1 | 4633 | #!/usr/bin/env python2
# Copyright (c) 2015 The Beardcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| mit | -7,462,409,792,335,607,000 | 41.504587 | 120 | 0.669976 | false |
m3z/HT | openstack_dashboard/dashboards/project/instances/views.py | 1 | 7166 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instances.
"""
import logging
from django import http
from django import shortcuts
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from .forms import UpdateInstance
from .tabs import InstanceDetailTabs
from .tables import InstancesTable
from .workflows import LaunchInstance
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = InstancesTable
template_name = 'project/instances/index.html'
def get_data(self):
# Gather our instances
try:
instances = api.server_list(self.request)
except:
instances = []
exceptions.handle(self.request,
_('Unable to retrieve instances.'))
# Gather our flavors and correlate our instances to them
if instances:
try:
flavors = api.flavor_list(self.request)
except:
flavors = []
exceptions.handle(self.request, ignore=True)
full_flavors = SortedDict([(str(flavor.id), flavor)
for flavor in flavors])
# Loop through instances to get flavor info.
for instance in instances:
try:
flavor_id = instance.flavor["id"]
if flavor_id in full_flavors:
instance.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# get it via nova api.
instance.full_flavor = api.flavor_get(self.request,
flavor_id)
except:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
return instances
class LaunchInstanceView(workflows.WorkflowView):
workflow_class = LaunchInstance
template_name = "project/instances/launch.html"
def get_initial(self):
initial = super(LaunchInstanceView, self).get_initial()
initial['project_id'] = self.request.user.tenant_id
initial['user_id'] = self.request.user.id
return initial
def console(request, instance_id):
try:
# TODO(jakedahn): clean this up once the api supports tailing.
tail = request.GET.get('length', None)
data = api.server_console_output(request,
instance_id,
tail_length=tail)
except:
data = _('Unable to get log for instance "%s".') % instance_id
exceptions.handle(request, ignore=True)
response = http.HttpResponse(mimetype='text/plain')
response.write(data)
response.flush()
return response
def vnc(request, instance_id):
try:
console = api.server_vnc_console(request, instance_id)
instance = api.server_get(request, instance_id)
return shortcuts.redirect(console.url +
("&title=%s(%s)" % (instance.name, instance_id)))
except:
redirect = reverse("horizon:project:instances:index")
msg = _('Unable to get VNC console for instance "%s".') % instance_id
exceptions.handle(request, msg, redirect=redirect)
class UpdateView(forms.ModalFormView):
form_class = UpdateInstance
template_name = 'project/instances/update.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:project:instances:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
instance_id = self.kwargs['instance_id']
try:
self._object = api.server_get(self.request, instance_id)
except:
redirect = reverse("horizon:project:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
return {'instance': self.kwargs['instance_id'],
'tenant_id': self.request.user.tenant_id,
'name': getattr(self.get_object(), 'name', '')}
class DetailView(tabs.TabView):
tab_group_class = InstanceDetailTabs
template_name = 'project/instances/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["instance"] = self.get_data()
return context
def get_data(self):
if not hasattr(self, "_instance"):
try:
instance_id = self.kwargs['instance_id']
instance = api.server_get(self.request, instance_id)
instance.volumes = api.instance_volumes_list(self.request,
instance_id)
# Sort by device name
instance.volumes.sort(key=lambda vol: vol.device)
instance.full_flavor = api.flavor_get(self.request,
instance.flavor["id"])
instance.security_groups = api.server_security_groups(
self.request, instance_id)
except:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_('Unable to retrieve details for '
'instance "%s".') % instance_id,
redirect=redirect)
self._instance = instance
return self._instance
def get_tabs(self, request, *args, **kwargs):
instance = self.get_data()
return self.tab_group_class(request, instance=instance, **kwargs)
| apache-2.0 | -4,953,366,527,320,025,000 | 37.320856 | 78 | 0.596428 | false |
madsmtm/fbchat | fbchat/utils.py | 1 | 8679 | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import re
import json
from time import time
from random import random
import warnings
import logging
from .models import *
try:
from urllib.parse import urlencode
basestring = (str, bytes)
except ImportError:
from urllib import urlencode
basestring = basestring
# Python 2's `input` executes the input, whereas `raw_input` just returns the input
try:
input = raw_input
except NameError:
pass
# Log settings
log = logging.getLogger("client")
log.setLevel(logging.DEBUG)
# Creates the console handler
handler = logging.StreamHandler()
log.addHandler(handler)
#: Default list of user agents
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/601.1.10 (KHTML, like Gecko) Version/8.0.5 Safari/601.1.10",
"Mozilla/5.0 (Windows NT 6.3; WOW64; ; NCT50_AAP285C84A1328) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
]
LIKES = {
'large': EmojiSize.LARGE,
'medium': EmojiSize.MEDIUM,
'small': EmojiSize.SMALL,
'l': EmojiSize.LARGE,
'm': EmojiSize.MEDIUM,
's': EmojiSize.SMALL
}
MessageReactionFix = {
'😍': ('0001f60d', '%F0%9F%98%8D'),
'😆': ('0001f606', '%F0%9F%98%86'),
'😮': ('0001f62e', '%F0%9F%98%AE'),
'😢': ('0001f622', '%F0%9F%98%A2'),
'😠': ('0001f620', '%F0%9F%98%A0'),
'👍': ('0001f44d', '%F0%9F%91%8D'),
'👎': ('0001f44e', '%F0%9F%91%8E')
}
GENDERS = {
# For standard requests
0: 'unknown',
1: 'female_singular',
2: 'male_singular',
3: 'female_singular_guess',
4: 'male_singular_guess',
5: 'mixed',
6: 'neuter_singular',
7: 'unknown_singular',
8: 'female_plural',
9: 'male_plural',
10: 'neuter_plural',
11: 'unknown_plural',
# For graphql requests
'UNKNOWN': 'unknown',
'FEMALE': 'female_singular',
'MALE': 'male_singular',
#'': 'female_singular_guess',
#'': 'male_singular_guess',
#'': 'mixed',
'NEUTER': 'neuter_singular',
#'': 'unknown_singular',
#'': 'female_plural',
#'': 'male_plural',
#'': 'neuter_plural',
#'': 'unknown_plural',
}
class ReqUrl(object):
"""A class containing all urls used by `fbchat`"""
SEARCH = "https://www.facebook.com/ajax/typeahead/search.php"
LOGIN = "https://m.facebook.com/login.php?login_attempt=1"
SEND = "https://www.facebook.com/messaging/send/"
UNREAD_THREADS = "https://www.facebook.com/ajax/mercury/unread_threads.php"
UNSEEN_THREADS = "https://www.facebook.com/mercury/unseen_thread_ids/"
THREADS = "https://www.facebook.com/ajax/mercury/threadlist_info.php"
MESSAGES = "https://www.facebook.com/ajax/mercury/thread_info.php"
READ_STATUS = "https://www.facebook.com/ajax/mercury/change_read_status.php"
DELIVERED = "https://www.facebook.com/ajax/mercury/delivery_receipts.php"
MARK_SEEN = "https://www.facebook.com/ajax/mercury/mark_seen.php"
BASE = "https://www.facebook.com"
MOBILE = "https://m.facebook.com/"
STICKY = "https://0-edge-chat.facebook.com/pull"
PING = "https://0-edge-chat.facebook.com/active_ping"
UPLOAD = "https://upload.facebook.com/ajax/mercury/upload.php"
INFO = "https://www.facebook.com/chat/user_info/"
CONNECT = "https://www.facebook.com/ajax/add_friend/action.php?dpr=1"
REMOVE_USER = "https://www.facebook.com/chat/remove_participants/"
LOGOUT = "https://www.facebook.com/logout.php"
ALL_USERS = "https://www.facebook.com/chat/user_info_all"
SAVE_DEVICE = "https://m.facebook.com/login/save-device/cancel/"
CHECKPOINT = "https://m.facebook.com/login/checkpoint/"
THREAD_COLOR = "https://www.facebook.com/messaging/save_thread_color/?source=thread_settings&dpr=1"
THREAD_NICKNAME = "https://www.facebook.com/messaging/save_thread_nickname/?source=thread_settings&dpr=1"
THREAD_EMOJI = "https://www.facebook.com/messaging/save_thread_emoji/?source=thread_settings&dpr=1"
MESSAGE_REACTION = "https://www.facebook.com/webgraphql/mutation"
TYPING = "https://www.facebook.com/ajax/messaging/typ.php"
GRAPHQL = "https://www.facebook.com/api/graphqlbatch/"
ATTACHMENT_PHOTO = "https://www.facebook.com/mercury/attachments/photo/"
EVENT_REMINDER = "https://www.facebook.com/ajax/eventreminder/create"
MODERN_SETTINGS_MENU = "https://www.facebook.com/bluebar/modern_settings_menu/"
pull_channel = 0
def change_pull_channel(self, channel=None):
if channel is None:
self.pull_channel = (self.pull_channel + 1) % 5 # Pull channel will be 0-4
else:
self.pull_channel = channel
self.STICKY = "https://{}-edge-chat.facebook.com/pull".format(self.pull_channel)
self.PING = "https://{}-edge-chat.facebook.com/active_ping".format(self.pull_channel)
facebookEncoding = 'UTF-8'
def now():
return int(time()*1000)
def strip_to_json(text):
try:
return text[text.index('{'):]
except ValueError:
raise FBchatException('No JSON object found: {}, {}'.format(repr(text), text.index('{')))
def get_decoded_r(r):
return get_decoded(r._content)
def get_decoded(content):
return content.decode(facebookEncoding)
def parse_json(content):
return json.loads(content)
def get_json(r):
return json.loads(strip_to_json(get_decoded_r(r)))
def digitToChar(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number, base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digitToChar(m)
return digitToChar(m)
def generateMessageID(client_id=None):
k = now()
l = int(random() * 4294967295)
return "<{}:{}-{}@mail.projektitan.com>".format(k, l, client_id)
def getSignatureID():
return hex(int(random() * 2147483648))
def generateOfflineThreadingID():
ret = now()
value = int(random() * 4294967295)
string = ("0000000000000000000000" + format(value, 'b'))[-22:]
msgs = format(ret, 'b') + string
return str(int(msgs, 2))
def check_json(j):
if j.get('error') is None:
return
if 'errorDescription' in j:
# 'errorDescription' is in the users own language!
raise FBchatFacebookError('Error #{} when sending request: {}'.format(j['error'], j['errorDescription']), fb_error_code=j['error'], fb_error_message=j['errorDescription'])
elif 'debug_info' in j['error'] and 'code' in j['error']:
raise FBchatFacebookError('Error #{} when sending request: {}'.format(j['error']['code'], repr(j['error']['debug_info'])), fb_error_code=j['error']['code'], fb_error_message=j['error']['debug_info'])
else:
raise FBchatFacebookError('Error {} when sending request'.format(j['error']), fb_error_code=j['error'])
def check_request(r, as_json=True):
if not r.ok:
raise FBchatFacebookError('Error when sending request: Got {} response'.format(r.status_code), request_status_code=r.status_code)
content = get_decoded_r(r)
if content is None or len(content) == 0:
raise FBchatFacebookError('Error when sending request: Got empty response')
if as_json:
content = strip_to_json(content)
try:
j = json.loads(content)
except ValueError:
raise FBchatFacebookError('Error while parsing JSON: {}'.format(repr(content)))
check_json(j)
return j
else:
return content
def get_jsmods_require(j, index):
if j.get('jsmods') and j['jsmods'].get('require'):
try:
return j['jsmods']['require'][0][index][0]
except (KeyError, IndexError) as e:
log.warning('Error when getting jsmods_require: {}. Facebook might have changed protocol'.format(j))
return None
def get_emojisize_from_tags(tags):
if tags is None:
return None
tmp = [tag for tag in tags if tag.startswith('hot_emoji_size:')]
if len(tmp) > 0:
try:
return LIKES[tmp[0].split(':')[1]]
except (KeyError, IndexError):
log.exception('Could not determine emoji size from {} - {}'.format(tags, tmp))
return None
| bsd-3-clause | -4,507,116,148,179,314,000 | 35.686441 | 207 | 0.647263 | false |
Azure/azure-storage-python | samples/queue/encryption_usage.py | 1 | 7709 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from os import urandom
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import (
OAEP,
MGF1,
)
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.keywrap import (
aes_key_wrap,
aes_key_unwrap,
)
# Sample implementations of the encryption-related interfaces.
class KeyWrapper:
def __init__(self, kid):
self.kek = urandom(32)
self.backend = default_backend()
self.kid = 'local:' + kid
def wrap_key(self, key, algorithm='A256KW'):
if algorithm == 'A256KW':
return aes_key_wrap(self.kek, key, self.backend)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def unwrap_key(self, key, algorithm):
if algorithm == 'A256KW':
return aes_key_unwrap(self.kek, key, self.backend)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def get_key_wrap_algorithm(self):
return 'A256KW'
def get_kid(self):
return self.kid
class KeyResolver:
def __init__(self):
self.keys = {}
def put_key(self, key):
self.keys[key.get_kid()] = key
def resolve_key(self, kid):
return self.keys[kid]
class RSAKeyWrapper:
def __init__(self, kid):
self.private_key = generate_private_key(public_exponent=65537,
key_size=2048,
backend=default_backend())
self.public_key = self.private_key.public_key()
self.kid = 'local:' + kid
def wrap_key(self, key, algorithm='RSA'):
if algorithm == 'RSA':
return self.public_key.encrypt(key,
OAEP(
mgf=MGF1(algorithm=SHA1()),
algorithm=SHA1(),
label=None)
)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def unwrap_key(self, key, algorithm):
if algorithm == 'RSA':
return self.private_key.decrypt(key,
OAEP(
mgf=MGF1(algorithm=SHA1()),
algorithm=SHA1(),
label=None)
)
raise ValueError(_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM)
def get_key_wrap_algorithm(self):
return 'RSA'
def get_kid(self):
return self.kid
class QueueEncryptionSamples():
def __init__(self, account):
self.account = account
def run_all_samples(self):
self.service = self.account.create_queue_service()
self.put_encrypted_message()
self.peek_get_update_encrypted()
self.decrypt_with_key_encryption_key()
self.require_encryption()
self.alternate_key_algorithms()
def _get_queue_reference(self, prefix='queue'):
queue_name = '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
return queue_name
def _create_queue(self, prefix='queue'):
queue_name = self._get_queue_reference(prefix)
self.service.create_queue(queue_name)
return queue_name
def put_encrypted_message(self):
queue_name = self._create_queue()
# KeyWrapper implements the key encryption key interface
# outlined in the get/update message documentation.
# Setting the key_encryption_key property will tell these
# APIs to encrypt messages.
self.service.key_encryption_key = KeyWrapper('key1')
self.service.put_message(queue_name, 'message1')
self.service.delete_queue(queue_name)
def peek_get_update_encrypted(self):
queue_name = self._create_queue()
# The KeyWrapper is still needed for encryption
self.service.key_encryption_key = KeyWrapper('key1')
self.service.put_message(queue_name, 'message1')
# KeyResolver is used to resolve a key from its id.
# Its interface is defined in the get/peek messages documentation.
key_resolver = KeyResolver()
key_resolver.put_key(self.service.key_encryption_key)
self.service.key_resolver_function = key_resolver.resolve_key
self.service.peek_messages(queue_name)
messages = self.service.get_messages(queue_name)
self.service.update_message(queue_name,
messages[0].id,
messages[0].pop_receipt,
0,
content='encrypted_message2')
self.service.delete_queue(queue_name)
def require_encryption(self):
queue_name = self._create_queue()
self.service.put_message(queue_name, 'Not encrypted')
# Set the require_encryption property on the service to
# ensure all messages sent/received are encrypted.
self.service.require_encryption = True
# If the property is set, but no kek is specified upon
# upload, the method will throw.
try:
self.service.put_message(queue_name, 'message1')
except:
pass
self.service.key_encryption_key = KeyWrapper('key1')
self.service.key_resolver_function = KeyResolver()
self.service.key_resolver_function.put_key(self.service.key_encryption_key)
# If encryption is required, but a retrieved message is not
# encrypted, the method will throw.
try:
self.service.peek_message(queue_name, 'message1')
except:
pass
self.service.delete_queue(queue_name)
def alternate_key_algorithms(self):
queue_name = self._create_queue()
# To use an alternate method of key wrapping, simply set the
# key_encryption_key property to a wrapper that uses a different algorithm.
self.service.key_encryption_key = RSAKeyWrapper('key2')
self.service.key_resolver_function = None
self.service.put_message(queue_name, 'message')
key_resolver = KeyResolver()
key_resolver.put_key(self.service.key_encryption_key)
self.service.key_resolver_function = key_resolver.resolve_key
message = self.service.peek_messages(queue_name)
self.service.delete_queue(queue_name)
def decrypt_with_key_encryption_key(self):
queue_name = self._create_queue()
# The KeyWrapper object also defines methods necessary for
# decryption as defined in the get/peek messages documentation.
# Since the key_encryption_key property is still set, messages
# will be decrypted automatically.
kek = KeyWrapper('key1')
self.service.key_encryption_key = kek
self.service.put_message(queue_name, 'message1')
# When decrypting, if both a kek and resolver are set,
# the resolver will take precedence. Remove the resolver to just use the kek.
self.service.key_resolver_function = None
messages = self.service.peek_messages(queue_name)
self.service.delete_queue(queue_name)
| mit | 5,391,607,162,951,025,000 | 35.192488 | 85 | 0.586587 | false |
communityshare/communityshare | community_share/flask_helpers.py | 1 | 2439 | from functools import wraps
from flask import request
from typing import Dict
from community_share.app_exceptions import Unauthorized, Forbidden
from community_share.authorization import get_requesting_user
from community_share.models.user import User
from community_share.models.base import Serializable
def api_path(path, query_args={}):
query = []
for name, values in query_args.items():
if not isinstance(values, list):
values = [values]
query += ['{}={}'.format(name, value) for value in values]
return '{base_url}rest/{path}{query}'.format(
base_url=request.url_root,
path=path,
query='?{}'.format('&'.join(query)) if query else ''
)
def needs_auth(auth_level='user'):
def needs_auth_decorator(f):
@wraps(f)
def auth_check(*args, **kwargs):
# Don't use
#
# user = kwargs.pop('requester', get_requesting_user())
#
# here because the eager execution of get_requesting_user
# will force us to be in flask app context during any test
# that uses a @needs_auth() method, and that makes unit
# tests harder.
if 'requester' in kwargs:
user = kwargs.pop('requester')
else:
user = get_requesting_user()
if user is None:
raise Unauthorized()
if 'admin' == auth_level and not user.is_administrator:
raise Unauthorized()
return f(*args, requester=user, **kwargs)
return auth_check
return needs_auth_decorator
def needs_admin_auth():
return needs_auth('admin')
def serialize(user, raw_item, fields=None):
if raw_item is None:
return None
item = raw_item.serialize(user)
if item is None:
return None
if fields is None:
return item
return {key: item[key] for key in item if key in fields + ['id']}
def serialize_many(user, raw_items, fields=None):
items = [serialize(user, item, fields) for item in raw_items]
return [item for item in items if item is not None]
def make_OK_response(message='OK'):
return {'message': message}
def make_single_response(requester: User, item: Serializable) -> Dict[str, Dict]:
serialized = serialize(requester, item)
if serialized is None:
raise Forbidden()
return {'data': serialized}
| mpl-2.0 | 6,863,411,846,151,159,000 | 25.802198 | 81 | 0.611316 | false |
Edraak/edraak-platform | lms/envs/aws.py | 1 | 53621 | # -*- coding: utf-8 -*-
"""
This is the default template for our main set of AWS servers.
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import datetime
import json
import dateutil
from .common import *
from openedx.core.lib.derived import derive_settings
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
DEBUG = False
DEFAULT_TEMPLATE_ENGINE['OPTIONS']['debug'] = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 60.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
HIGH_MEM_QUEUE = 'edx.{0}core.high_mem'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
CELERY_ROUTES = "{}celery.Router".format(QUEUE_VARIANT)
CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE)
WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json"
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = ENV_TOKENS.get('DEFAULT_COURSE_ABOUT_IMAGE_URL', DEFAULT_COURSE_ABOUT_IMAGE_URL)
# COURSE_MODE_DEFAULTS specifies the course mode to use for courses that do not set one
COURSE_MODE_DEFAULTS = ENV_TOKENS.get('COURSE_MODE_DEFAULTS', COURSE_MODE_DEFAULTS)
# MEDIA_ROOT specifies the directory where user-uploaded files are stored.
MEDIA_ROOT = ENV_TOKENS.get('MEDIA_ROOT', MEDIA_ROOT)
MEDIA_URL = ENV_TOKENS.get('MEDIA_URL', MEDIA_URL)
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', PLATFORM_NAME)
PLATFORM_DESCRIPTION = ENV_TOKENS.get('PLATFORM_DESCRIPTION', PLATFORM_DESCRIPTION)
# For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = ENV_TOKENS.get('PLATFORM_TWITTER_ACCOUNT', PLATFORM_TWITTER_ACCOUNT)
PLATFORM_FACEBOOK_ACCOUNT = ENV_TOKENS.get('PLATFORM_FACEBOOK_ACCOUNT', PLATFORM_FACEBOOK_ACCOUNT)
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = ENV_TOKENS.get('SOCIAL_MEDIA_FOOTER_URLS', SOCIAL_MEDIA_FOOTER_URLS)
CC_MERCHANT_NAME = ENV_TOKENS.get('CC_MERCHANT_NAME', PLATFORM_NAME)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', 'localhost') # django default is localhost
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', 25) # django default is 25
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', False) # django default is False
SITE_NAME = ENV_TOKENS['SITE_NAME']
HTTPS = ENV_TOKENS.get('HTTPS', HTTPS)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', 'us-east-1')
AWS_SES_REGION_ENDPOINT = ENV_TOKENS.get('AWS_SES_REGION_ENDPOINT', 'email.us-east-1.amazonaws.com')
REGISTRATION_EXTRA_FIELDS = ENV_TOKENS.get('REGISTRATION_EXTRA_FIELDS', REGISTRATION_EXTRA_FIELDS)
REGISTRATION_EXTENSION_FORM = ENV_TOKENS.get('REGISTRATION_EXTENSION_FORM', REGISTRATION_EXTENSION_FORM)
REGISTRATION_EMAIL_PATTERNS_ALLOWED = ENV_TOKENS.get('REGISTRATION_EMAIL_PATTERNS_ALLOWED')
REGISTRATION_FIELD_ORDER = ENV_TOKENS.get('REGISTRATION_FIELD_ORDER', REGISTRATION_FIELD_ORDER)
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
LMS_ROOT_URL = ENV_TOKENS.get('LMS_ROOT_URL')
LMS_INTERNAL_ROOT_URL = ENV_TOKENS.get('LMS_INTERNAL_ROOT_URL', LMS_ROOT_URL)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
CMS_BASE = ENV_TOKENS.get('CMS_BASE', 'studio.edx.org')
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('LMS_BASE'),
FEATURES['PREVIEW_LMS_BASE'],
]
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
# Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
CONTACT_EMAIL = ENV_TOKENS.get('CONTACT_EMAIL', CONTACT_EMAIL)
BUGS_EMAIL = ENV_TOKENS.get('BUGS_EMAIL', BUGS_EMAIL)
PAYMENT_SUPPORT_EMAIL = ENV_TOKENS.get('PAYMENT_SUPPORT_EMAIL', PAYMENT_SUPPORT_EMAIL)
FINANCE_EMAIL = ENV_TOKENS.get('FINANCE_EMAIL', FINANCE_EMAIL)
UNIVERSITY_EMAIL = ENV_TOKENS.get('UNIVERSITY_EMAIL', UNIVERSITY_EMAIL)
PRESS_EMAIL = ENV_TOKENS.get('PRESS_EMAIL', PRESS_EMAIL)
CONTACT_MAILING_ADDRESS = ENV_TOKENS.get('CONTACT_MAILING_ADDRESS', CONTACT_MAILING_ADDRESS)
# Account activation email sender address
ACTIVATION_EMAIL_FROM_ADDRESS = ENV_TOKENS.get('ACTIVATION_EMAIL_FROM_ADDRESS', ACTIVATION_EMAIL_FROM_ADDRESS)
# Currency
PAID_COURSE_REGISTRATION_CURRENCY = ENV_TOKENS.get('PAID_COURSE_REGISTRATION_CURRENCY',
PAID_COURSE_REGISTRATION_CURRENCY)
# Payment Report Settings
PAYMENT_REPORT_GENERATOR_GROUP = ENV_TOKENS.get('PAYMENT_REPORT_GENERATOR_GROUP', PAYMENT_REPORT_GENERATOR_GROUP)
# Bulk Email overrides
BULK_EMAIL_DEFAULT_FROM_EMAIL = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_FROM_EMAIL', BULK_EMAIL_DEFAULT_FROM_EMAIL)
BULK_EMAIL_EMAILS_PER_TASK = ENV_TOKENS.get('BULK_EMAIL_EMAILS_PER_TASK', BULK_EMAIL_EMAILS_PER_TASK)
BULK_EMAIL_DEFAULT_RETRY_DELAY = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_RETRY_DELAY', BULK_EMAIL_DEFAULT_RETRY_DELAY)
BULK_EMAIL_MAX_RETRIES = ENV_TOKENS.get('BULK_EMAIL_MAX_RETRIES', BULK_EMAIL_MAX_RETRIES)
BULK_EMAIL_INFINITE_RETRY_CAP = ENV_TOKENS.get('BULK_EMAIL_INFINITE_RETRY_CAP', BULK_EMAIL_INFINITE_RETRY_CAP)
BULK_EMAIL_LOG_SENT_EMAILS = ENV_TOKENS.get('BULK_EMAIL_LOG_SENT_EMAILS', BULK_EMAIL_LOG_SENT_EMAILS)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = ENV_TOKENS.get(
'BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS',
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS
)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
# We have to reset the value here, since we have changed the value of the queue name.
BULK_EMAIL_ROUTING_KEY = ENV_TOKENS.get('BULK_EMAIL_ROUTING_KEY', HIGH_PRIORITY_QUEUE)
# We can run smaller jobs on the low priority queue. See note above for why
# we have to reset the value here.
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = ENV_TOKENS.get('BULK_EMAIL_ROUTING_KEY_SMALL_JOBS', LOW_PRIORITY_QUEUE)
# Queue to use for expiring old entitlements
ENTITLEMENTS_EXPIRATION_ROUTING_KEY = ENV_TOKENS.get('ENTITLEMENTS_EXPIRATION_ROUTING_KEY', LOW_PRIORITY_QUEUE)
# Message expiry time in seconds
CELERY_EVENT_QUEUE_TTL = ENV_TOKENS.get('CELERY_EVENT_QUEUE_TTL', None)
# Allow CELERY_QUEUES to be overwritten by ENV_TOKENS,
ENV_CELERY_QUEUES = ENV_TOKENS.get('CELERY_QUEUES', None)
if ENV_CELERY_QUEUES:
CELERY_QUEUES = {queue: {} for queue in ENV_CELERY_QUEUES}
# Then add alternate environment queues
ALTERNATE_QUEUE_ENVS = ENV_TOKENS.get('ALTERNATE_WORKER_QUEUES', '').split()
ALTERNATE_QUEUES = [
DEFAULT_PRIORITY_QUEUE.replace(QUEUE_VARIANT, alternate + '.')
for alternate in ALTERNATE_QUEUE_ENVS
]
CELERY_QUEUES.update(
{
alternate: {}
for alternate in ALTERNATE_QUEUES
if alternate not in CELERY_QUEUES.keys()
}
)
# following setting is for backward compatibility
if ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR', None):
COMPREHENSIVE_THEME_DIR = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIR')
COMPREHENSIVE_THEME_DIRS = ENV_TOKENS.get('COMPREHENSIVE_THEME_DIRS', COMPREHENSIVE_THEME_DIRS) or []
# COMPREHENSIVE_THEME_LOCALE_PATHS contain the paths to themes locale directories e.g.
# "COMPREHENSIVE_THEME_LOCALE_PATHS" : [
# "/edx/src/edx-themes/conf/locale"
# ],
COMPREHENSIVE_THEME_LOCALE_PATHS = ENV_TOKENS.get('COMPREHENSIVE_THEME_LOCALE_PATHS', [])
DEFAULT_SITE_THEME = ENV_TOKENS.get('DEFAULT_SITE_THEME', DEFAULT_SITE_THEME)
ENABLE_COMPREHENSIVE_THEMING = ENV_TOKENS.get('ENABLE_COMPREHENSIVE_THEMING', ENABLE_COMPREHENSIVE_THEMING)
# Marketing link overrides
MKTG_URL_LINK_MAP.update(ENV_TOKENS.get('MKTG_URL_LINK_MAP', {}))
# Intentional defaults.
SUPPORT_SITE_LINK = ENV_TOKENS.get('SUPPORT_SITE_LINK', SUPPORT_SITE_LINK)
ID_VERIFICATION_SUPPORT_LINK = ENV_TOKENS.get('ID_VERIFICATION_SUPPORT_LINK', SUPPORT_SITE_LINK)
PASSWORD_RESET_SUPPORT_LINK = ENV_TOKENS.get('PASSWORD_RESET_SUPPORT_LINK', SUPPORT_SITE_LINK)
ACTIVATION_EMAIL_SUPPORT_LINK = ENV_TOKENS.get(
'ACTIVATION_EMAIL_SUPPORT_LINK', SUPPORT_SITE_LINK
)
# Mobile store URL overrides
MOBILE_STORE_URLS = ENV_TOKENS.get('MOBILE_STORE_URLS', MOBILE_STORE_URLS)
# Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
CERTIFICATE_TEMPLATE_LANGUAGES = ENV_TOKENS.get('CERTIFICATE_TEMPLATE_LANGUAGES', CERTIFICATE_TEMPLATE_LANGUAGES)
LANGUAGE_DICT = dict(LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
LANGUAGE_COOKIE = ENV_TOKENS.get('LANGUAGE_COOKIE', LANGUAGE_COOKIE)
ALL_LANGUAGES = ENV_TOKENS.get('ALL_LANGUAGES', ALL_LANGUAGES)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS.append(app)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
local_loglevel = ENV_TOKENS.get('LOCAL_LOGLEVEL', 'INFO')
LOG_DIR = ENV_TOKENS['LOG_DIR']
DATA_DIR = path(ENV_TOKENS.get('DATA_DIR', DATA_DIR))
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
local_loglevel=local_loglevel,
service_variant=SERVICE_VARIANT)
COURSE_LISTINGS = ENV_TOKENS.get('COURSE_LISTINGS', {})
COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL", '')
COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY", '')
CERT_NAME_SHORT = ENV_TOKENS.get('CERT_NAME_SHORT', CERT_NAME_SHORT)
CERT_NAME_LONG = ENV_TOKENS.get('CERT_NAME_LONG', CERT_NAME_LONG)
CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
ZENDESK_URL = ENV_TOKENS.get('ZENDESK_URL', ZENDESK_URL)
ZENDESK_CUSTOM_FIELDS = ENV_TOKENS.get('ZENDESK_CUSTOM_FIELDS', ZENDESK_CUSTOM_FIELDS)
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
# Badgr API
BADGR_API_TOKEN = ENV_TOKENS.get('BADGR_API_TOKEN', BADGR_API_TOKEN)
BADGR_BASE_URL = ENV_TOKENS.get('BADGR_BASE_URL', BADGR_BASE_URL)
BADGR_ISSUER_SLUG = ENV_TOKENS.get('BADGR_ISSUER_SLUG', BADGR_ISSUER_SLUG)
BADGR_TIMEOUT = ENV_TOKENS.get('BADGR_TIMEOUT', BADGR_TIMEOUT)
# git repo loading environment
GIT_REPO_DIR = ENV_TOKENS.get('GIT_REPO_DIR', '/edx/var/edxapp/course_repos')
GIT_IMPORT_STATIC = ENV_TOKENS.get('GIT_IMPORT_STATIC', True)
GIT_IMPORT_PYTHON_LIB = ENV_TOKENS.get('GIT_IMPORT_PYTHON_LIB', True)
PYTHON_LIB_FILENAME = ENV_TOKENS.get('PYTHON_LIB_FILENAME', 'python_lib.zip')
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# SSL external authentication settings
SSL_AUTH_EMAIL_DOMAIN = ENV_TOKENS.get("SSL_AUTH_EMAIL_DOMAIN", "MIT.EDU")
SSL_AUTH_DN_FORMAT_STRING = ENV_TOKENS.get(
"SSL_AUTH_DN_FORMAT_STRING",
"/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}"
)
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
]
INSTALLED_APPS.append('django_cas')
MIDDLEWARE_CLASSES.append('django_cas.middleware.CASMiddleware')
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
# Branded footer
FOOTER_OPENEDX_URL = ENV_TOKENS.get('FOOTER_OPENEDX_URL', FOOTER_OPENEDX_URL)
FOOTER_OPENEDX_LOGO_IMAGE = ENV_TOKENS.get('FOOTER_OPENEDX_LOGO_IMAGE', FOOTER_OPENEDX_LOGO_IMAGE)
FOOTER_ORGANIZATION_IMAGE = ENV_TOKENS.get('FOOTER_ORGANIZATION_IMAGE', FOOTER_ORGANIZATION_IMAGE)
FOOTER_CACHE_TIMEOUT = ENV_TOKENS.get('FOOTER_CACHE_TIMEOUT', FOOTER_CACHE_TIMEOUT)
FOOTER_BROWSER_CACHE_MAX_AGE = ENV_TOKENS.get('FOOTER_BROWSER_CACHE_MAX_AGE', FOOTER_BROWSER_CACHE_MAX_AGE)
# Credit notifications settings
NOTIFICATION_EMAIL_CSS = ENV_TOKENS.get('NOTIFICATION_EMAIL_CSS', NOTIFICATION_EMAIL_CSS)
NOTIFICATION_EMAIL_EDX_LOGO = ENV_TOKENS.get('NOTIFICATION_EMAIL_EDX_LOGO', NOTIFICATION_EMAIL_EDX_LOGO)
# Determines whether the CSRF token can be transported on
# unencrypted channels. It is set to False here for backward compatibility,
# but it is highly recommended that this is True for enviroments accessed
# by end users.
CSRF_COOKIE_SECURE = ENV_TOKENS.get('CSRF_COOKIE_SECURE', False)
CSRF_COOKIE_DOMAIN = ENV_TOKENS.get('CSRF_COOKIE_DOMAIN', "")
############# CORS headers for cross-domain requests #################
if FEATURES.get('ENABLE_CORS_HEADERS') or FEATURES.get('ENABLE_CROSS_DOMAIN_CSRF_COOKIE'):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ENV_TOKENS.get('CORS_ORIGIN_WHITELIST', ())
CORS_ORIGIN_ALLOW_ALL = ENV_TOKENS.get('CORS_ORIGIN_ALLOW_ALL', False)
CORS_ALLOW_INSECURE = ENV_TOKENS.get('CORS_ALLOW_INSECURE', False)
# If setting a cross-domain cookie, it's really important to choose
# a name for the cookie that is DIFFERENT than the cookies used
# by each subdomain. For example, suppose the applications
# at these subdomains are configured to use the following cookie names:
#
# 1) foo.example.com --> "csrftoken"
# 2) baz.example.com --> "csrftoken"
# 3) bar.example.com --> "csrftoken"
#
# For the cross-domain version of the CSRF cookie, you need to choose
# a name DIFFERENT than "csrftoken"; otherwise, the new token configured
# for ".example.com" could conflict with the other cookies,
# non-deterministically causing 403 responses.
#
# Because of the way Django stores cookies, the cookie name MUST
# be a `str`, not unicode. Otherwise there will `TypeError`s will be raised
# when Django tries to call the unicode `translate()` method with the wrong
# number of parameters.
CROSS_DOMAIN_CSRF_COOKIE_NAME = str(ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_NAME'))
# When setting the domain for the "cross-domain" version of the CSRF
# cookie, you should choose something like: ".example.com"
# (note the leading dot), where both the referer and the host
# are subdomains of "example.com".
#
# Browser security rules require that
# the cookie domain matches the domain of the server; otherwise
# the cookie won't get set. And once the cookie gets set, the client
# needs to be on a domain that matches the cookie domain, otherwise
# the client won't be able to read the cookie.
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_DOMAIN')
# Field overrides. To use the IDDE feature, add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider'.
FIELD_OVERRIDE_PROVIDERS = tuple(ENV_TOKENS.get('FIELD_OVERRIDE_PROVIDERS', []))
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
############### Module Store Items ##########
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = ENV_TOKENS.get('HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', {})
# PREVIEW DOMAIN must be present in HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS for the preview to show draft changes
if 'PREVIEW_LMS_BASE' in FEATURES and FEATURES['PREVIEW_LMS_BASE'] != '':
PREVIEW_DOMAIN = FEATURES['PREVIEW_LMS_BASE'].split(':')[0]
# update dictionary with preview domain regex
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS.update({
PREVIEW_DOMAIN: 'draft-preferred'
})
MODULESTORE_FIELD_OVERRIDE_PROVIDERS = ENV_TOKENS.get(
'MODULESTORE_FIELD_OVERRIDE_PROVIDERS',
MODULESTORE_FIELD_OVERRIDE_PROVIDERS
)
XBLOCK_FIELD_DATA_WRAPPERS = ENV_TOKENS.get(
'XBLOCK_FIELD_DATA_WRAPPERS',
XBLOCK_FIELD_DATA_WRAPPERS
)
############### Mixed Related(Secure/Not-Secure) Items ##########
LMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
CC_PROCESSOR_NAME = AUTH_TOKENS.get('CC_PROCESSOR_NAME', CC_PROCESSOR_NAME)
CC_PROCESSOR = AUTH_TOKENS.get('CC_PROCESSOR', CC_PROCESSOR)
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
# EDRAAK LOGISTRATION KEYS
EDRAAK_LOGISTRATION_SECRET_KEY = AUTH_TOKENS.get("EDRAAK_LOGISTRATION_SECRET_KEY", "edraak2020")
EDRAAK_LOGISTRATION_SIGNING_ALGORITHM = AUTH_TOKENS.get("EDRAAK_LOGISTRATION_SIGNING_ALGORITHM", "HS256")
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
AWS_STORAGE_BUCKET_NAME = AUTH_TOKENS.get('AWS_STORAGE_BUCKET_NAME', 'edxuploads')
GS_BUCKET_NAME = AUTH_TOKENS.get('GS_BUCKET_NAME', 'edxuploads')
try:
# IMPORTANT: Keep in sync with the cms/aws.py
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
if AUTH_TOKENS.get('SENTRY_DSN'):
sentry_sdk.init(
dsn=AUTH_TOKENS.get('SENTRY_DSN'),
integrations=[DjangoIntegration()]
)
except ImportError:
# Disable it when Sentry isn't installed, useful for devstack
pass
# Disabling querystring auth instructs Boto to exclude the querystring parameters (e.g. signature, access key) it
# normally appends to every returned URL.
AWS_QUERYSTRING_AUTH = AUTH_TOKENS.get('AWS_QUERYSTRING_AUTH', True)
AWS_S3_CUSTOM_DOMAIN = AUTH_TOKENS.get('AWS_S3_CUSTOM_DOMAIN', 'edxuploads.s3.amazonaws.com')
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_BUCKET_NAME', FILE_UPLOAD_STORAGE_BUCKET_NAME)
FILE_UPLOAD_STORAGE_PREFIX = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_PREFIX', FILE_UPLOAD_STORAGE_PREFIX)
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASES = AUTH_TOKENS['DATABASES']
# The normal database user does not have enough permissions to run migrations.
# Migrations are run with separate credentials, given as DB_MIGRATION_*
# environment variables
for name, database in DATABASES.items():
if name != 'read_replica':
database.update({
'ENGINE': os.environ.get('DB_MIGRATION_ENGINE', database['ENGINE']),
'USER': os.environ.get('DB_MIGRATION_USER', database['USER']),
'PASSWORD': os.environ.get('DB_MIGRATION_PASS', database['PASSWORD']),
'NAME': os.environ.get('DB_MIGRATION_NAME', database['NAME']),
'HOST': os.environ.get('DB_MIGRATION_HOST', database['HOST']),
'PORT': os.environ.get('DB_MIGRATION_PORT', database['PORT']),
})
XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE']
# Get the MODULESTORE from auth.json, but if it doesn't exist,
# use the one from common.py
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS.get('CONTENTSTORE', CONTENTSTORE)
DOC_STORE_CONFIG = AUTH_TOKENS.get('DOC_STORE_CONFIG', DOC_STORE_CONFIG)
MONGODB_LOG = AUTH_TOKENS.get('MONGODB_LOG', {})
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', '') # django default is ''
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', '') # django default is ''
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Analytics API
ANALYTICS_API_KEY = AUTH_TOKENS.get("ANALYTICS_API_KEY", ANALYTICS_API_KEY)
ANALYTICS_API_URL = ENV_TOKENS.get("ANALYTICS_API_URL", ANALYTICS_API_URL)
# Mailchimp New User List
MAILCHIMP_NEW_USER_LIST_ID = ENV_TOKENS.get("MAILCHIMP_NEW_USER_LIST_ID")
# Zendesk
ZENDESK_USER = AUTH_TOKENS.get("ZENDESK_USER")
ZENDESK_API_KEY = AUTH_TOKENS.get("ZENDESK_API_KEY")
# API Key for inbound requests from Notifier service
EDX_API_KEY = AUTH_TOKENS.get("EDX_API_KEY")
# Celery Broker
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
BROKER_USE_SSL = ENV_TOKENS.get('CELERY_BROKER_USE_SSL', False)
# Block Structures
BLOCK_STRUCTURES_SETTINGS = ENV_TOKENS.get('BLOCK_STRUCTURES_SETTINGS', BLOCK_STRUCTURES_SETTINGS)
# upload limits
STUDENT_FILEUPLOAD_MAX_SIZE = ENV_TOKENS.get("STUDENT_FILEUPLOAD_MAX_SIZE", STUDENT_FILEUPLOAD_MAX_SIZE)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
TRACKING_SEGMENTIO_WEBHOOK_SECRET = AUTH_TOKENS.get(
"TRACKING_SEGMENTIO_WEBHOOK_SECRET",
TRACKING_SEGMENTIO_WEBHOOK_SECRET
)
TRACKING_SEGMENTIO_ALLOWED_TYPES = ENV_TOKENS.get("TRACKING_SEGMENTIO_ALLOWED_TYPES", TRACKING_SEGMENTIO_ALLOWED_TYPES)
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = ENV_TOKENS.get(
"TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES",
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES
)
TRACKING_SEGMENTIO_SOURCE_MAP = ENV_TOKENS.get("TRACKING_SEGMENTIO_SOURCE_MAP", TRACKING_SEGMENTIO_SOURCE_MAP)
# Heartbeat
HEARTBEAT_CHECKS = ENV_TOKENS.get('HEARTBEAT_CHECKS', HEARTBEAT_CHECKS)
HEARTBEAT_EXTENDED_CHECKS = ENV_TOKENS.get('HEARTBEAT_EXTENDED_CHECKS', HEARTBEAT_EXTENDED_CHECKS)
HEARTBEAT_CELERY_TIMEOUT = ENV_TOKENS.get('HEARTBEAT_CELERY_TIMEOUT', HEARTBEAT_CELERY_TIMEOUT)
# Student identity verification settings
VERIFY_STUDENT = AUTH_TOKENS.get("VERIFY_STUDENT", VERIFY_STUDENT)
DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH = ENV_TOKENS.get(
"DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH",
DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH
)
# Grades download
GRADES_DOWNLOAD_ROUTING_KEY = ENV_TOKENS.get('GRADES_DOWNLOAD_ROUTING_KEY', HIGH_MEM_QUEUE)
GRADES_DOWNLOAD = ENV_TOKENS.get("GRADES_DOWNLOAD", GRADES_DOWNLOAD)
# Rate limit for regrading tasks that a grading policy change can kick off
POLICY_CHANGE_TASK_RATE_LIMIT = ENV_TOKENS.get('POLICY_CHANGE_TASK_RATE_LIMIT', POLICY_CHANGE_TASK_RATE_LIMIT)
# financial reports
FINANCIAL_REPORTS = ENV_TOKENS.get("FINANCIAL_REPORTS", FINANCIAL_REPORTS)
##### ORA2 ######
# Prefix for uploads of example-based assessment AI classifiers
# This can be used to separate uploads for different environments
# within the same S3 bucket.
ORA2_FILE_PREFIX = ENV_TOKENS.get("ORA2_FILE_PREFIX", ORA2_FILE_PREFIX)
################ Edraak Apps ###############
if FEATURES.get('EDRAAK_RATELIMIT_APP'):
# Keep in sync with {cms,lms}/envs/{test,aws}.py
INSTALLED_APPS += ('edraak_ratelimit',)
import edraak_ratelimit.helpers
AUTHENTICATION_BACKENDS = edraak_ratelimit.helpers.update_authentication_backends(AUTHENTICATION_BACKENDS)
# Keep it in sync with {lms,cms}/envs/{test,aws}.py
if FEATURES.get('EDRAAK_I18N_APP'):
# Common app, keep it in sync with the CMS
import edraak_i18n.helpers
INSTALLED_APPS += ('edraak_i18n',)
MIDDLEWARE_CLASSES = edraak_i18n.helpers.add_locale_middleware(MIDDLEWARE_CLASSES)
if FEATURES.get('EDRAAK_UNIVERSITY_APP'):
INSTALLED_APPS += ('edraak_university',)
if FEATURES.get('EDRAAK_CERTIFICATES_APP') and FEATURES.get('ORGANIZATIONS_APP'):
INSTALLED_APPS += ('edraak_certificates',)
EDRAAK_SENDINBLUE_API_KEY = ENV_TOKENS.get("EDRAAK_SENDINBLUE_API_KEY", None)
EDRAAK_SENDINBLUE_LISTID = ENV_TOKENS.get("EDRAAK_SENDINBLUE_LISTID", None)
if EDRAAK_SENDINBLUE_API_KEY:
INSTALLED_APPS += ('edraak_sendinblue.apps.EdraakSendInBlueConfig',)
INSTALLED_APPS += ('edraak_specializations',)
PROGS_URLS = ENV_TOKENS.get('PROGS_URLS', PROGS_URLS)
# Redirection options
EDRAAK_ENABLE_AUTH_EXTERNAL_REDIRECT = ENV_TOKENS.get("EDRAAK_ENABLE_AUTH_EXTERNAL_REDIRECT", False)
EDRAAK_AUTH_REDIRECT_ALLOW_ANY = ENV_TOKENS.get("EDRAAK_AUTH_REDIRECT_ALLOW_ANY", False)
EDRAAK_AUTH_REDIRECT_ORIGINS_WHITELIST = ENV_TOKENS.get("EDRAAK_AUTH_REDIRECT_ORIGINS_WHITELIST", [])
EDRAAK_AUTH_REDIRECT_REGX_ORIGINS = ENV_TOKENS.get("EDRAAK_AUTH_REDIRECT_REGX_ORIGINS", [])
EDRAAK_JWT_SETTINGS = ENV_TOKENS.get('EDRAAK_JWT_SETTINGS', EDRAAK_JWT_SETTINGS)
EDRAAK_UTM_PARAMS_CERTIFICATE_EMAIL = ENV_TOKENS.get(
"EDRAAK_UTM_PARAMS_CERTIFICATE_EMAIL", "?utm_source=recengine&utm_medium=email&utm_campaign=certificate"
)
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = ENV_TOKENS.get("TIME_ZONE_DISPLAYED_FOR_DEADLINES",
TIME_ZONE_DISPLAYED_FOR_DEADLINES)
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### Third-party auth options ################################################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH'):
tmp_backends = ENV_TOKENS.get('THIRD_PARTY_AUTH_BACKENDS', [
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.linkedin.LinkedinOAuth2',
'edraak_social.backends.facebook.EdraakFacebookOAuth2',
'edraak_social.backends.forus.ForUsOAuth2',
'social_core.backends.azuread.AzureADOAuth2',
'third_party_auth.saml.SAMLAuthBackend',
'third_party_auth.lti.LTIAuthBackend',
])
AUTHENTICATION_BACKENDS = list(tmp_backends) + list(AUTHENTICATION_BACKENDS)
del tmp_backends
# The reduced session expiry time during the third party login pipeline. (Value in seconds)
SOCIAL_AUTH_PIPELINE_TIMEOUT = ENV_TOKENS.get('SOCIAL_AUTH_PIPELINE_TIMEOUT', 600)
# Most provider configuration is done via ConfigurationModels but for a few sensitive values
# we allow configuration via AUTH_TOKENS instead (optionally).
# The SAML private/public key values do not need the delimiter lines (such as
# "-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----" etc.) but they may be included
# if you want (though it's easier to format the key values as JSON without the delimiters).
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PRIVATE_KEY_DICT', {})
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT = AUTH_TOKENS.get('SOCIAL_AUTH_SAML_SP_PUBLIC_CERT_DICT', {})
SOCIAL_AUTH_OAUTH_SECRETS = AUTH_TOKENS.get('SOCIAL_AUTH_OAUTH_SECRETS', {})
SOCIAL_AUTH_LTI_CONSUMER_SECRETS = AUTH_TOKENS.get('SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {})
# third_party_auth config moved to ConfigurationModels. This is for data migration only:
THIRD_PARTY_AUTH_OLD_CONFIG = AUTH_TOKENS.get('THIRD_PARTY_AUTH', None)
if ENV_TOKENS.get('THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS', 24) is not None:
CELERYBEAT_SCHEDULE['refresh-saml-metadata'] = {
'task': 'third_party_auth.fetch_saml_metadata',
'schedule': datetime.timedelta(hours=ENV_TOKENS.get('THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS', 24)),
}
# The following can be used to integrate a custom login form with third_party_auth.
# It should be a dict where the key is a word passed via ?auth_entry=, and the value is a
# dict with an arbitrary 'secret_key' and a 'url'.
THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = AUTH_TOKENS.get('THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS', {})
##### OAUTH2 Provider ##############
if FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
OAUTH_ENFORCE_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_SECURE', True)
OAUTH_ENFORCE_CLIENT_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_CLIENT_SECURE', True)
# Defaults for the following are defined in lms.envs.common
OAUTH_EXPIRE_DELTA = datetime.timedelta(
days=ENV_TOKENS.get('OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS', OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS)
)
OAUTH_EXPIRE_DELTA_PUBLIC = datetime.timedelta(
days=ENV_TOKENS.get('OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS', OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS)
)
OAUTH_ID_TOKEN_EXPIRATION = ENV_TOKENS.get('OAUTH_ID_TOKEN_EXPIRATION', OAUTH_ID_TOKEN_EXPIRATION)
OAUTH_DELETE_EXPIRED = ENV_TOKENS.get('OAUTH_DELETE_EXPIRED', OAUTH_DELETE_EXPIRED)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
##### GOOGLE ANALYTICS IDS #####
GOOGLE_ANALYTICS_ACCOUNT = AUTH_TOKENS.get('GOOGLE_ANALYTICS_ACCOUNT')
GOOGLE_ANALYTICS_TRACKING_ID = AUTH_TOKENS.get('GOOGLE_ANALYTICS_TRACKING_ID')
GOOGLE_ANALYTICS_LINKEDIN = AUTH_TOKENS.get('GOOGLE_ANALYTICS_LINKEDIN')
GOOGLE_SITE_VERIFICATION_ID = ENV_TOKENS.get('GOOGLE_SITE_VERIFICATION_ID')
##### BRANCH.IO KEY #####
BRANCH_IO_KEY = AUTH_TOKENS.get('BRANCH_IO_KEY')
##### OPTIMIZELY PROJECT ID #####
OPTIMIZELY_PROJECT_ID = AUTH_TOKENS.get('OPTIMIZELY_PROJECT_ID', OPTIMIZELY_PROJECT_ID)
#### Course Registration Code length ####
REGISTRATION_CODE_LENGTH = ENV_TOKENS.get('REGISTRATION_CODE_LENGTH', 8)
# REGISTRATION CODES DISPLAY INFORMATION
INVOICE_CORP_ADDRESS = ENV_TOKENS.get('INVOICE_CORP_ADDRESS', INVOICE_CORP_ADDRESS)
INVOICE_PAYMENT_INSTRUCTIONS = ENV_TOKENS.get('INVOICE_PAYMENT_INSTRUCTIONS', INVOICE_PAYMENT_INSTRUCTIONS)
# Which access.py permission names to check;
# We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_CATALOG_VISIBILITY_PERMISSION',
COURSE_CATALOG_VISIBILITY_PERMISSION
)
COURSE_ABOUT_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
COURSE_ABOUT_VISIBILITY_PERMISSION
)
DEFAULT_COURSE_VISIBILITY_IN_CATALOG = ENV_TOKENS.get(
'DEFAULT_COURSE_VISIBILITY_IN_CATALOG',
DEFAULT_COURSE_VISIBILITY_IN_CATALOG
)
DEFAULT_MOBILE_AVAILABLE = ENV_TOKENS.get(
'DEFAULT_MOBILE_AVAILABLE',
DEFAULT_MOBILE_AVAILABLE
)
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = ENV_TOKENS.get('ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
# PDF RECEIPT/INVOICE OVERRIDES
PDF_RECEIPT_TAX_ID = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID', PDF_RECEIPT_TAX_ID)
PDF_RECEIPT_FOOTER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_FOOTER_TEXT', PDF_RECEIPT_FOOTER_TEXT)
PDF_RECEIPT_DISCLAIMER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_DISCLAIMER_TEXT', PDF_RECEIPT_DISCLAIMER_TEXT)
PDF_RECEIPT_BILLING_ADDRESS = ENV_TOKENS.get('PDF_RECEIPT_BILLING_ADDRESS', PDF_RECEIPT_BILLING_ADDRESS)
PDF_RECEIPT_TERMS_AND_CONDITIONS = ENV_TOKENS.get('PDF_RECEIPT_TERMS_AND_CONDITIONS', PDF_RECEIPT_TERMS_AND_CONDITIONS)
PDF_RECEIPT_TAX_ID_LABEL = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID_LABEL', PDF_RECEIPT_TAX_ID_LABEL)
PDF_RECEIPT_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_LOGO_PATH', PDF_RECEIPT_LOGO_PATH)
PDF_RECEIPT_COBRAND_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_COBRAND_LOGO_PATH', PDF_RECEIPT_COBRAND_LOGO_PATH)
PDF_RECEIPT_LOGO_HEIGHT_MM = ENV_TOKENS.get('PDF_RECEIPT_LOGO_HEIGHT_MM', PDF_RECEIPT_LOGO_HEIGHT_MM)
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = ENV_TOKENS.get(
'PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM', PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM
)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or \
FEATURES.get('ENABLE_DASHBOARD_SEARCH') or \
FEATURES.get('ENABLE_COURSE_DISCOVERY') or \
FEATURES.get('ENABLE_TEAMS'):
# Use ElasticSearch as the search engine herein
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
ELASTIC_SEARCH_CONFIG = ENV_TOKENS.get('ELASTIC_SEARCH_CONFIG', [{}])
# Facebook app
FACEBOOK_API_VERSION = AUTH_TOKENS.get("FACEBOOK_API_VERSION")
FACEBOOK_APP_SECRET = AUTH_TOKENS.get("FACEBOOK_APP_SECRET")
FACEBOOK_APP_ID = AUTH_TOKENS.get("FACEBOOK_APP_ID")
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
SOCIAL_AUTH_FACEBOOK_AUTH_EXTRA_ARGUMENTS = {
'auth_type': 'reauthorize',
}
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
##### VIDEO IMAGE STORAGE #####
VIDEO_IMAGE_SETTINGS = ENV_TOKENS.get('VIDEO_IMAGE_SETTINGS', VIDEO_IMAGE_SETTINGS)
##### VIDEO TRANSCRIPTS STORAGE #####
VIDEO_TRANSCRIPTS_SETTINGS = ENV_TOKENS.get('VIDEO_TRANSCRIPTS_SETTINGS', VIDEO_TRANSCRIPTS_SETTINGS)
##### ECOMMERCE API CONFIGURATION SETTINGS #####
ECOMMERCE_PUBLIC_URL_ROOT = ENV_TOKENS.get('ECOMMERCE_PUBLIC_URL_ROOT', ECOMMERCE_PUBLIC_URL_ROOT)
ECOMMERCE_API_URL = ENV_TOKENS.get('ECOMMERCE_API_URL', ECOMMERCE_API_URL)
ECOMMERCE_API_TIMEOUT = ENV_TOKENS.get('ECOMMERCE_API_TIMEOUT', ECOMMERCE_API_TIMEOUT)
COURSE_CATALOG_API_URL = ENV_TOKENS.get('COURSE_CATALOG_API_URL', COURSE_CATALOG_API_URL)
ECOMMERCE_SERVICE_WORKER_USERNAME = ENV_TOKENS.get(
'ECOMMERCE_SERVICE_WORKER_USERNAME',
ECOMMERCE_SERVICE_WORKER_USERNAME
)
##### Custom Courses for EdX #####
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ['lms.djangoapps.ccx', 'openedx.core.djangoapps.ccxcon.apps.CCXConnectorConfig']
MODULESTORE_FIELD_OVERRIDE_PROVIDERS += (
'lms.djangoapps.ccx.overrides.CustomCoursesForEdxOverrideProvider',
)
CCX_MAX_STUDENTS_ALLOWED = ENV_TOKENS.get('CCX_MAX_STUDENTS_ALLOWED', CCX_MAX_STUDENTS_ALLOWED)
##### Individual Due Date Extensions #####
if FEATURES.get('INDIVIDUAL_DUE_DATES'):
FIELD_OVERRIDE_PROVIDERS += (
'courseware.student_field_overrides.IndividualStudentOverrideProvider',
)
##### Self-Paced Course Due Dates #####
XBLOCK_FIELD_DATA_WRAPPERS += (
'lms.djangoapps.courseware.field_overrides:OverrideModulestoreFieldData.wrap',
)
MODULESTORE_FIELD_OVERRIDE_PROVIDERS += (
'courseware.self_paced_overrides.SelfPacedDateOverrideProvider',
)
# PROFILE IMAGE CONFIG
PROFILE_IMAGE_BACKEND = ENV_TOKENS.get('PROFILE_IMAGE_BACKEND', PROFILE_IMAGE_BACKEND)
PROFILE_IMAGE_SECRET_KEY = AUTH_TOKENS.get('PROFILE_IMAGE_SECRET_KEY', PROFILE_IMAGE_SECRET_KEY)
PROFILE_IMAGE_MAX_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MAX_BYTES', PROFILE_IMAGE_MAX_BYTES)
PROFILE_IMAGE_MIN_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MIN_BYTES', PROFILE_IMAGE_MIN_BYTES)
PROFILE_IMAGE_DEFAULT_FILENAME = 'images/profiles/default'
PROFILE_IMAGE_SIZES_MAP = ENV_TOKENS.get(
'PROFILE_IMAGE_SIZES_MAP',
PROFILE_IMAGE_SIZES_MAP
)
# EdxNotes config
EDXNOTES_PUBLIC_API = ENV_TOKENS.get('EDXNOTES_PUBLIC_API', EDXNOTES_PUBLIC_API)
EDXNOTES_INTERNAL_API = ENV_TOKENS.get('EDXNOTES_INTERNAL_API', EDXNOTES_INTERNAL_API)
EDXNOTES_CONNECT_TIMEOUT = ENV_TOKENS.get('EDXNOTES_CONNECT_TIMEOUT', EDXNOTES_CONNECT_TIMEOUT)
EDXNOTES_READ_TIMEOUT = ENV_TOKENS.get('EDXNOTES_READ_TIMEOUT', EDXNOTES_READ_TIMEOUT)
##### Credit Provider Integration #####
CREDIT_PROVIDER_SECRET_KEYS = AUTH_TOKENS.get("CREDIT_PROVIDER_SECRET_KEYS", {})
##################### LTI Provider #####################
if FEATURES.get('ENABLE_LTI_PROVIDER'):
INSTALLED_APPS.append('lti_provider.apps.LtiProviderConfig')
AUTHENTICATION_BACKENDS.append('lti_provider.users.LtiBackend')
LTI_USER_EMAIL_DOMAIN = ENV_TOKENS.get('LTI_USER_EMAIL_DOMAIN', 'lti.example.com')
# For more info on this, see the notes in common.py
LTI_AGGREGATE_SCORE_PASSBACK_DELAY = ENV_TOKENS.get(
'LTI_AGGREGATE_SCORE_PASSBACK_DELAY', LTI_AGGREGATE_SCORE_PASSBACK_DELAY
)
##################### Credit Provider help link ####################
CREDIT_HELP_LINK_URL = ENV_TOKENS.get('CREDIT_HELP_LINK_URL', CREDIT_HELP_LINK_URL)
#### JWT configuration ####
DEFAULT_JWT_ISSUER = ENV_TOKENS.get('DEFAULT_JWT_ISSUER', DEFAULT_JWT_ISSUER)
RESTRICTED_APPLICATION_JWT_ISSUER = ENV_TOKENS.get(
'RESTRICTED_APPLICATION_JWT_ISSUER',
RESTRICTED_APPLICATION_JWT_ISSUER
)
JWT_AUTH.update(ENV_TOKENS.get('JWT_AUTH', {}))
JWT_PRIVATE_SIGNING_KEY = ENV_TOKENS.get('JWT_PRIVATE_SIGNING_KEY', JWT_PRIVATE_SIGNING_KEY)
JWT_EXPIRED_PRIVATE_SIGNING_KEYS = ENV_TOKENS.get('JWT_EXPIRED_PRIVATE_SIGNING_KEYS', JWT_EXPIRED_PRIVATE_SIGNING_KEYS)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
################# MICROSITE ####################
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
# this setting specify which backend to be used when pulling microsite specific configuration
MICROSITE_BACKEND = ENV_TOKENS.get("MICROSITE_BACKEND", MICROSITE_BACKEND)
# this setting specify which backend to be used when loading microsite specific templates
MICROSITE_TEMPLATE_BACKEND = ENV_TOKENS.get("MICROSITE_TEMPLATE_BACKEND", MICROSITE_TEMPLATE_BACKEND)
# TTL for microsite database template cache
MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = ENV_TOKENS.get(
"MICROSITE_DATABASE_TEMPLATE_CACHE_TTL", MICROSITE_DATABASE_TEMPLATE_CACHE_TTL
)
# Offset for pk of courseware.StudentModuleHistoryExtended
STUDENTMODULEHISTORYEXTENDED_OFFSET = ENV_TOKENS.get(
'STUDENTMODULEHISTORYEXTENDED_OFFSET', STUDENTMODULEHISTORYEXTENDED_OFFSET
)
# Cutoff date for granting audit certificates
if ENV_TOKENS.get('AUDIT_CERT_CUTOFF_DATE', None):
AUDIT_CERT_CUTOFF_DATE = dateutil.parser.parse(ENV_TOKENS.get('AUDIT_CERT_CUTOFF_DATE'))
################################ Settings for Credentials Service ################################
CREDENTIALS_GENERATION_ROUTING_KEY = ENV_TOKENS.get('CREDENTIALS_GENERATION_ROUTING_KEY', HIGH_PRIORITY_QUEUE)
# The extended StudentModule history table
if FEATURES.get('ENABLE_CSMH_EXTENDED'):
INSTALLED_APPS.append('coursewarehistoryextended')
API_ACCESS_MANAGER_EMAIL = ENV_TOKENS.get('API_ACCESS_MANAGER_EMAIL')
API_ACCESS_FROM_EMAIL = ENV_TOKENS.get('API_ACCESS_FROM_EMAIL')
# Mobile App Version Upgrade config
APP_UPGRADE_CACHE_TIMEOUT = ENV_TOKENS.get('APP_UPGRADE_CACHE_TIMEOUT', APP_UPGRADE_CACHE_TIMEOUT)
AFFILIATE_COOKIE_NAME = ENV_TOKENS.get('AFFILIATE_COOKIE_NAME', AFFILIATE_COOKIE_NAME)
############## Settings for LMS Context Sensitive Help ##############
HELP_TOKENS_BOOKS = ENV_TOKENS.get('HELP_TOKENS_BOOKS', HELP_TOKENS_BOOKS)
############## OPEN EDX ENTERPRISE SERVICE CONFIGURATION ######################
# The Open edX Enterprise service is currently hosted via the LMS container/process.
# However, for all intents and purposes this service is treated as a standalone IDA.
# These configuration settings are specific to the Enterprise service and you should
# not find references to them within the edx-platform project.
# Publicly-accessible enrollment URL, for use on the client side.
ENTERPRISE_PUBLIC_ENROLLMENT_API_URL = ENV_TOKENS.get(
'ENTERPRISE_PUBLIC_ENROLLMENT_API_URL',
(LMS_ROOT_URL or '') + LMS_ENROLLMENT_API_PATH
)
# Enrollment URL used on the server-side.
ENTERPRISE_ENROLLMENT_API_URL = ENV_TOKENS.get(
'ENTERPRISE_ENROLLMENT_API_URL',
(LMS_INTERNAL_ROOT_URL or '') + LMS_ENROLLMENT_API_PATH
)
# Enterprise logo image size limit in KB's
ENTERPRISE_CUSTOMER_LOGO_IMAGE_SIZE = ENV_TOKENS.get(
'ENTERPRISE_CUSTOMER_LOGO_IMAGE_SIZE',
ENTERPRISE_CUSTOMER_LOGO_IMAGE_SIZE
)
# Course enrollment modes to be hidden in the Enterprise enrollment page
# if the "Hide audit track" flag is enabled for an EnterpriseCustomer
ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES = ENV_TOKENS.get(
'ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES',
ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES
)
# A support URL used on Enterprise landing pages for when a warning
# message goes off.
ENTERPRISE_SUPPORT_URL = ENV_TOKENS.get(
'ENTERPRISE_SUPPORT_URL',
ENTERPRISE_SUPPORT_URL
)
# A shared secret to be used for encrypting passwords passed from the enterprise api
# to the enteprise reporting script.
ENTERPRISE_REPORTING_SECRET = AUTH_TOKENS.get(
'ENTERPRISE_REPORTING_SECRET',
ENTERPRISE_REPORTING_SECRET
)
############## ENTERPRISE SERVICE API CLIENT CONFIGURATION ######################
# The LMS communicates with the Enterprise service via the EdxRestApiClient class
# The below environmental settings are utilized by the LMS when interacting with
# the service, and override the default parameters which are defined in common.py
DEFAULT_ENTERPRISE_API_URL = None
if LMS_INTERNAL_ROOT_URL is not None:
DEFAULT_ENTERPRISE_API_URL = LMS_INTERNAL_ROOT_URL + '/enterprise/api/v1/'
ENTERPRISE_API_URL = ENV_TOKENS.get('ENTERPRISE_API_URL', DEFAULT_ENTERPRISE_API_URL)
DEFAULT_ENTERPRISE_CONSENT_API_URL = None
if LMS_INTERNAL_ROOT_URL is not None:
DEFAULT_ENTERPRISE_CONSENT_API_URL = LMS_INTERNAL_ROOT_URL + '/consent/api/v1/'
ENTERPRISE_CONSENT_API_URL = ENV_TOKENS.get('ENTERPRISE_CONSENT_API_URL', DEFAULT_ENTERPRISE_CONSENT_API_URL)
ENTERPRISE_SERVICE_WORKER_USERNAME = ENV_TOKENS.get(
'ENTERPRISE_SERVICE_WORKER_USERNAME',
ENTERPRISE_SERVICE_WORKER_USERNAME
)
ENTERPRISE_API_CACHE_TIMEOUT = ENV_TOKENS.get(
'ENTERPRISE_API_CACHE_TIMEOUT',
ENTERPRISE_API_CACHE_TIMEOUT
)
############## ENTERPRISE SERVICE LMS CONFIGURATION ##################################
# The LMS has some features embedded that are related to the Enterprise service, but
# which are not provided by the Enterprise service. These settings override the
# base values for the parameters as defined in common.py
ENTERPRISE_PLATFORM_WELCOME_TEMPLATE = ENV_TOKENS.get(
'ENTERPRISE_PLATFORM_WELCOME_TEMPLATE',
ENTERPRISE_PLATFORM_WELCOME_TEMPLATE
)
ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE = ENV_TOKENS.get(
'ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE',
ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE
)
ENTERPRISE_TAGLINE = ENV_TOKENS.get(
'ENTERPRISE_TAGLINE',
ENTERPRISE_TAGLINE
)
ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS = set(
ENV_TOKENS.get(
'ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS',
ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS
)
)
BASE_COOKIE_DOMAIN = ENV_TOKENS.get(
'BASE_COOKIE_DOMAIN',
BASE_COOKIE_DOMAIN
)
############## CATALOG/DISCOVERY SERVICE API CLIENT CONFIGURATION ######################
# The LMS communicates with the Catalog service via the EdxRestApiClient class
# The below environmental settings are utilized by the LMS when interacting with
# the service, and override the default parameters which are defined in common.py
COURSES_API_CACHE_TIMEOUT = ENV_TOKENS.get('COURSES_API_CACHE_TIMEOUT', COURSES_API_CACHE_TIMEOUT)
# Add an ICP license for serving content in China if your organization is registered to do so
ICP_LICENSE = ENV_TOKENS.get('ICP_LICENSE', None)
############## Settings for CourseGraph ############################
COURSEGRAPH_JOB_QUEUE = ENV_TOKENS.get('COURSEGRAPH_JOB_QUEUE', LOW_PRIORITY_QUEUE)
########################## Parental controls config #######################
# The age at which a learner no longer requires parental consent, or None
# if parental consent is never required.
PARENTAL_CONSENT_AGE_LIMIT = ENV_TOKENS.get(
'PARENTAL_CONSENT_AGE_LIMIT',
PARENTAL_CONSENT_AGE_LIMIT
)
# Do NOT calculate this dynamically at startup with git because it's *slow*.
EDX_PLATFORM_REVISION = ENV_TOKENS.get('EDX_PLATFORM_REVISION', EDX_PLATFORM_REVISION)
########################## Extra middleware classes #######################
# Allow extra middleware classes to be added to the app through configuration.
MIDDLEWARE_CLASSES.extend(ENV_TOKENS.get('EXTRA_MIDDLEWARE_CLASSES', []))
########################## Settings for Completion API #####################
# Once a user has watched this percentage of a video, mark it as complete:
# (0.0 = 0%, 1.0 = 100%)
COMPLETION_VIDEO_COMPLETE_PERCENTAGE = ENV_TOKENS.get(
'COMPLETION_VIDEO_COMPLETE_PERCENTAGE',
COMPLETION_VIDEO_COMPLETE_PERCENTAGE,
)
# The time a block needs to be viewed to be considered complete, in milliseconds.
COMPLETION_BY_VIEWING_DELAY_MS = ENV_TOKENS.get('COMPLETION_BY_VIEWING_DELAY_MS', COMPLETION_BY_VIEWING_DELAY_MS)
############### Settings for django-fernet-fields ##################
FERNET_KEYS = AUTH_TOKENS.get('FERNET_KEYS', FERNET_KEYS)
################# Settings for the maintenance banner #################
MAINTENANCE_BANNER_TEXT = ENV_TOKENS.get('MAINTENANCE_BANNER_TEXT', None)
############### Settings for Retirement #####################
RETIRED_USERNAME_PREFIX = ENV_TOKENS.get('RETIRED_USERNAME_PREFIX', RETIRED_USERNAME_PREFIX)
RETIRED_EMAIL_PREFIX = ENV_TOKENS.get('RETIRED_EMAIL_PREFIX', RETIRED_EMAIL_PREFIX)
RETIRED_EMAIL_DOMAIN = ENV_TOKENS.get('RETIRED_EMAIL_DOMAIN', RETIRED_EMAIL_DOMAIN)
RETIRED_USER_SALTS = ENV_TOKENS.get('RETIRED_USER_SALTS', RETIRED_USER_SALTS)
RETIREMENT_SERVICE_WORKER_USERNAME = ENV_TOKENS.get(
'RETIREMENT_SERVICE_WORKER_USERNAME',
RETIREMENT_SERVICE_WORKER_USERNAME
)
RETIREMENT_STATES = ENV_TOKENS.get('RETIREMENT_STATES', RETIREMENT_STATES)
############################### Plugin Settings ###############################
from openedx.core.djangoapps.plugins import plugin_settings, constants as plugin_constants
plugin_settings.add_plugins(__name__, plugin_constants.ProjectType.LMS, plugin_constants.SettingsType.AWS)
########################## Derive Any Derived Settings #######################
derive_settings(__name__)
# the hosts that the platform is safe to redirect to
ALLOWED_REDIRECT_HOSTS = ENV_TOKENS.get("SAFE_REDIRECT_HOSTS", [])
| agpl-3.0 | -2,047,620,486,655,663,900 | 44.249789 | 120 | 0.726637 | false |
mglidden/git-analysis | analysis/manual_classification.py | 1 | 2181 | import fix_paths
from models.commit import Commit
import common
import config
from models.file_diff import FileDiff
from models.hunk import Hunk
import csv
import random
num_train = raw_input("Enter number of samples for the training set [100]:")
num_test = raw_input("Enter number of samples for the testing set [100]:")
try:
num_train = int(num_train)
except ValueError:
num_train = 100
try:
num_test = int(num_test)
except:
num_test = 100
session = common.Session()
assert num_train + num_test <= session.query(Commit.id).count(), 'Train and test size is larger than the number of commits.'
iteration_order = list(range(session.query(Commit.id).count()))
random.shuffle(iteration_order)
training_data = []
testing_data = []
training_file = open(config.TRAINING_DATA_PATH, 'a')
training_writer = csv.writer(training_file)
testing_file = open(config.TESTING_DATA_PATH, 'a')
testing_writer = csv.writer(testing_file)
def classifyCommit(session, commit_id):
commit = session.query(Commit).filter(Commit.id == commit_id).first()
print 'ID:\t\t%s' % commit.id
print 'Hash:\t\t%s' % commit.hash
print 'Is merge:\t%s' % commit.is_merge
print 'Message:\t%s' % commit.message.replace('\n', ' ')
if commit.patch:
print 'Lines added:\t%s' % commit.patch.lines_removed()
print 'Lines removed:\t%s' % commit.patch.lines_added()
print 'Files changed:'
for file_diff in commit.patch.file_diffs:
print '\t%s, +%s, -%s' % (file_diff.new_file_path, file_diff.lines_removed(), file_diff.lines_added())
classification_number = None
while not classification_number:
try:
classification_number = int(raw_input('Enter classification number for commit %s: ' % (commit_id)))
except:
print 'Enter an int for the classification number.'
print
return (classification_number, commit_id)
i = 1
for index in iteration_order:
if i <= num_train:
training_writer.writerow(classifyCommit(session, index))
training_file.flush()
elif i <= num_train + num_test:
testing_writer.writerow(classifyCommit(session, index))
testing_file.flush()
else:
break
i += 1
training_file.close()
testing_file.close()
| mit | -1,118,231,830,233,449,000 | 27.324675 | 125 | 0.701513 | false |
shyampurk/bluemix-parking-meter | testApp/app.py | 1 | 1873 | '''*********************************************************************************
APP - SMART PARKING LOT SYSTEM
*********************************************************************************'''
from pubnub import Pubnub
from threading import Thread
import sys
pub_key = "demo"
sub_key = "demo"
g_userData = dict()
g_myCar = dict()
g_lotNumber = sys.argv[1]
g_carNumber = sys.argv[2]
def init():
#Pubnub Key Initialization
global pubnub
pubnub = Pubnub(publish_key=pub_key,subscribe_key=sub_key)
pubnub.subscribe(channels='parkingapp-resp', callback=callback, error=callback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
pubnub.subscribe(channels=g_carNumber, callback=caRcallback, error=caRcallback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
def callback(message, channel):
g_userData.update(message)
def caRcallback(message, channel):
g_myCar.update(message)
def dataHandling(stdin):
l_action = int(stdin.readline().strip())
if(l_action == 1):
pubnub.publish(channel='parkingapp-req',message={"requester":"APP",
"lotNumber":0,"requestType":1,"requestValue":0})
elif(l_action == 2):
pubnub.publish(channel='parkingapp-req',
message={"requester":"APP","lotNumber":g_lotNumber,
"requestType":2,"requestValue":g_carNumber})
elif(l_action == 3):
print "\n\n", g_userData
print "\n\n", g_myCar
else:
pass
def error(message):
print("ERROR : " + str(message))
def connect(message):
print "CONNECTED"
def reconnect(message):
print("RECONNECTED")
def disconnect(message):
print("DISCONNECTED")
if __name__ == '__main__':
init()
while True:
t1 = Thread(target=dataHandling, args=(sys.stdin,))
t1.start()
t1.join()
#End of the Script
##*****************************************************************************************************##
| mit | -4,338,057,441,368,052,000 | 26.558824 | 105 | 0.595836 | false |
mbiciunas/nix | test/config/script/test_showScript.py | 1 | 1231 | # Nix
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from config.script.show_script import ShowScript
from utility.nix_error import NixError
class TestShowScript:
def test_show(self, config_valid, capsys):
_show_script = ShowScript()
_show_script.show(config_valid.SCRIPT_VALID_1)
out, err = capsys.readouterr()
assert config_valid.SCRIPT_VALID_CODE_1 in out
def test_show_invalid_name(self, config_valid):
_show_script = ShowScript()
with pytest.raises(NixError):
_show_script.show(config_valid.SCRIPT_INVALID_1)
| gpl-3.0 | 8,268,116,551,829,773,000 | 32.27027 | 71 | 0.722989 | false |
sambandi/eMonitor | emonitor/lib/location.py | 2 | 5124 | import re
from math import cos, sqrt, tan, sin, atan, trunc, radians, degrees
def getFloat(value):
try:
return float(value)
except ValueError:
value = str(value).replace('B', '8').replace('O', '0').replace(',', '.')
_errcount = 0
for i in value:
if not re.match(r'[0-9\.]]', i):
_errcount += 1
if _errcount == 0:
return float(value)
return None
class Location:
"""
Location class for position calculation and conversion, can handle GK and Wgs84 notation - default Wgs84
"""
earthRadius = 6378137.0 # Earth radius in m
aBessel = 6377397.155
eeBessel = 0.0066743722296294277832
ScaleFactor = 0.00000982
RotXRad = -7.16069806998785E-06
RotYRad = 3.56822869296619E-07
RotZRad = 7.06858347057704E-06
ShiftXMeters = 591.28
ShiftYMeters = 81.35
ShiftZMeters = 396.39
def __init__(self, x, y, geotype='wgs84'): # wgs84 (default), gk
self.x = getFloat(x)
self.y = getFloat(y)
self.geotype = geotype.lower()
def __repr__(self):
return u"<location: {}, {} ({})>".format(self.x, self.y, self.geotype)
def getLatLng(self, use_wgs84=None):
if self.geotype == 'gk': # gauss kruger
(x, y) = self._gk_transformation()
return Location.seven_parameter_helmert_transf(x, y, use_wgs84)
else:
return self.x, self.y
def getDistance(self, lat, lng):
"""
get distance in meters
"""
(lat1, lng1) = self.getLatLng()
x = ((radians(lng - lng1)) * cos(0.5 * (radians(lat + lat1))))**2
return Location.earthRadius * sqrt(x + (radians(lat - lat1))**2)
def _gk_transformation(self): # transformation for gauss kruger
# Check for invalid Parameters
if not ((self.x > 1000000) and (self.y > 1000000)) and self.geotype != 'gk':
raise ValueError("No valid Gauss-Kruger-Code.")
# Variables to prepare the geovalues
bii = (self.y / 10000855.7646)**2
bf = (325632.08677 * (self.y / 10000855.7646) * (((((0.00000562025 * bii + 0.00022976983) * bii - 0.00113566119) * bii + 0.00424914906) * bii - 0.00831729565) * bii + 1)) / degrees(3600)
g2 = 0.0067192188 * cos(bf)**2
fa = (self.x - trunc(self.x / 1000000) * 1000000 - 500000) / (6398786.849 / sqrt(1 + g2))
geo_dez_right = degrees(bf - fa**2 * tan(bf) * (1 + g2) / 2 + fa**4 * tan(bf) * (5 + 3 * tan(bf)**2 + 6 * g2 - 6 * g2 * tan(bf)**2) / 24)
geo_dez_height = degrees(fa - fa**3 * (1 + 2 * tan(bf)**2 + g2) / 6 + fa**5 * (1 + 28 * tan(bf)**2 + 24 * tan(bf)**4) / 120) / cos(bf) + trunc(self.x / 1000000) * 3
return geo_dez_right, geo_dez_height
@staticmethod
def seven_parameter_helmert_transf(x, y, use_wgs84=False):
# calculate coordinates with helmert transformation
latitudeit = 99999999
if use_wgs84:
ee = 0.0066943799
else:
ee = 0.00669438002290
n = Location.aBessel / sqrt(1 - (Location.eeBessel * sin(radians(x))**2))
cartesian_x_meters = n * cos(radians(x)) * cos(radians(y))
cartesian_y_meters = n * cos(radians(x)) * sin(radians(y))
cartesian_z_meters = n * (1 - Location.eeBessel) * sin(radians(x))
cart_output_x_meters = (1 + Location.ScaleFactor) * cartesian_x_meters + Location.RotZRad * cartesian_y_meters - Location.RotYRad * cartesian_z_meters + Location.ShiftXMeters
cart_output_y_meters = -1 * Location.RotZRad * cartesian_x_meters + (1 + Location.ScaleFactor) * cartesian_y_meters + Location.RotXRad * cartesian_z_meters + Location.ShiftYMeters
cart_output_z_meters = Location.RotYRad * cartesian_x_meters - Location.RotXRad * cartesian_y_meters + (1 + Location.ScaleFactor) * cartesian_z_meters + Location.ShiftZMeters
geo_dez_height = atan(cart_output_y_meters / cart_output_x_meters)
latitude = atan(cart_output_z_meters / sqrt((cart_output_x_meters * cart_output_x_meters) + (cart_output_y_meters * cart_output_y_meters)))
while abs(latitude - latitudeit) >= 0.000000000000001:
latitudeit = latitude
n = Location.earthRadius / sqrt(1 - ee * sin(latitude)**2)
latitude = atan((cart_output_z_meters + ee * n * sin(latitudeit)) / sqrt(cart_output_x_meters**2 + cart_output_y_meters * cart_output_y_meters))
return degrees(latitude), degrees(geo_dez_height)
if __name__ == "__main__":
# test values
# location1 (48.124570, 11.582328)
lkx1 = 4469012.74
lky1 = 5331920.84
# location2 (48.1103206, 11.7233732)
lkx2 = 4479507.160
lky2 = "53302B9,O32" # test value with error
l1 = Location(lkx1, lky1, geotype='gk')
l2 = Location(lkx2, lky2, geotype='gk')
l3 = Location(48.1103206, 11.7233732) # test coordinates (imprecision)
print "l1: {}\nl2: {}\nl3: {}".format(l1, l2, l3)
print "\nl2->l3 {:8.2f} m (precision)".format(l2.getDistance(*l3.getLatLng()))
print "l2->l1 {:8.2f} m".format(l2.getDistance(*l1.getLatLng()))
| bsd-3-clause | 2,488,118,184,217,061,400 | 42.423729 | 194 | 0.598946 | false |
rzhxeo/youtube-dl | youtube_dl/extractor/youtube.py | 1 | 76832 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
ExtractorError,
get_element_by_attribute,
get_element_by_id,
int_or_none,
OnDemandPagedList,
orderedSet,
unescapeHTML,
unified_strdate,
uppercase_escape,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
# chokes on unicode
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
tfa_code = self._get_tfa_info()
if tfa_code is None:
self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
# Unlike the first login form, secTok and timeStmp are both required for the TFA form
match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
if match is None:
self._downloader.report_warning('Failed to get secTok - did the page structure change?')
secTok = match.group(1)
match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
if match is None:
self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
timeStmp = match.group(1)
tfa_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'smsToken': '',
'smsUserPin': tfa_code,
'smsVerifyPin': 'Verify',
'PersistentCookie': 'yes',
'checkConnection': '',
'checkedDomains': 'youtube',
'pstMsg': '1',
'secTok': secTok,
'timeStmp': timeStmp,
'service': 'youtube',
'hl': 'en_US',
}
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
|youtu\.be/ # just youtu.be/xxxx
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240},
'6': {'ext': 'flv', 'width': 450, 'height': 270},
'13': {'ext': '3gp'},
'17': {'ext': '3gp', 'width': 176, 'height': 144},
'18': {'ext': 'mp4', 'width': 640, 'height': 360},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720},
'34': {'ext': 'flv', 'width': 640, 'height': 360},
'35': {'ext': 'flv', 'width': 854, 'height': 480},
'36': {'ext': '3gp', 'width': 320, 'height': 240},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
'43': {'ext': 'webm', 'width': 640, 'height': 360},
'44': {'ext': 'webm', 'width': 854, 'height': 480},
'45': {'ext': 'webm', 'width': 1280, 'height': 720},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080},
# 3d videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
# Apple HTTP Live Streaming
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'like_count': int,
'dislike_count': int,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia'
}
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:2acfda1b285bdd478ccec22f9918199d',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20120731',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note='Downloading %s player %s' % (player_type, player_id),
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note='Downloading %s player %s' % (player_type, player_id),
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
if cache_spec is None:
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_available_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
'name': track.attrib['name'].encode('utf-8'),
})
url = 'https://www.youtube.com/api/timedtext?' + params
sub_lang_list[lang] = url
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_available_automatic_caption(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
self.to_screen('%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
player_config = json.loads(mobj.group(1))
try:
args = player_config['args']
caption_url = args['ttsurl']
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': sub_format,
'ts': timestamp,
'kind': caption_kind,
})
sub_lang_list[sub_lang] = caption_url + '&' + params
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _parse_dash_manifest(
self, video_id, dash_manifest_url, player_url, age_gate):
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
note='Downloading DASH manifest',
errnote='Could not download DASH manifest')
formats = []
for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
f = {
'format_id': format_id,
'url': video_url,
'width': int_or_none(r.attrib.get('width')),
'height': int_or_none(r.attrib.get('height')),
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == format_id)
except StopIteration:
f.update(self._formats.get(format_id, {}).items())
formats.append(f)
else:
existing_format.update(f)
return formats
def _real_extract(self, url):
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
# Get video info
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse.urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
else:
age_gate = False
try:
# Try looking directly into the video webpage
mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
if not mobj:
raise ValueError('Could not find ytplayer.config') # caught below
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config['args']
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
if 'url_encoded_fmt_stream_map' not in args:
raise ValueError('No stream_map present') # caught below
except ValueError:
# We fallback to the get_video_info pages (used by the embed page)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
if 'token' in video_info:
break
if 'token' not in video_info:
if 'reason' in video_info:
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning('unable to extract uploader nickname')
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = None
mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
if mobj is None:
mobj = re.search(
r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
video_webpage)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
def _extract_count(count_name):
count = self._search_regex(
r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
video_webpage, count_name, default=None)
if count is not None:
return int(count.replace(',', ''))
return None
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, video_webpage)
return
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
url_map = {}
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")',
embed_webpage if age_gate else video_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
url_map[format_id] = url
formats = _map_to_format_list(url_map)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd = video_info.get('dashmpd')
if dash_mpd:
dash_manifest_url = dash_mpd[0]
try:
dash_formats = self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate)
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
else:
# Hide the formats we found through non-DASH
dash_keys = set(df['format_id'] for df in dash_formats)
for f in formats:
if f['format_id'] in dash_keys:
f['format_id'] = 'nondash-%s' % f['format_id']
f['preference'] = f.get('preference', 0) - 10000
formats.extend(dash_formats)
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'subtitles': video_subtitles,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _extract_mix(self, playlist_id):
# The mixes are generated from a a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
more_widget_html = content_html = page
# Check if the playlist exists or is private
if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
# Extract the video ids from the playlist pages
ids = []
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, playlist_title)
class YoutubeChannelIE(InfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
}]
def extract_videos_from_page(self, page):
ids_in_page = []
for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1))
return ids_in_page
def _real_extract(self, url):
channel_id = self._match_id(url)
video_ids = []
url = 'https://www.youtube.com/channel/%s/videos' % channel_id
channel_page = self._download_webpage(url, channel_id)
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
video_ids = self.extract_videos_from_page(channel_page)
entries = [
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
return self.playlist_result(entries, channel_id)
def _entries():
more_widget_html = content_html = channel_page
for pagenum in itertools.count(1):
ids_in_page = self.extract_videos_from_page(content_html)
for video_id in ids_in_page:
yield self.url_result(
video_id, 'Youtube', video_id=video_id)
mobj = re.search(
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), channel_id,
'Downloading page #%s' % (pagenum + 1),
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(_entries(), channel_id)
class YoutubeUserIE(InfoExtractor):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'title': 'TheLinuxFoundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _real_extract(self, url):
username = self._match_id(url)
# Download video ids using YouTube Data API. Result size per
# query is limited (currently to 50 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
def download_page(pagenum):
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
page = self._download_webpage(
gdata_url, username,
'Downloading video ids from %d to %d' % (
start_index, start_index + self._GDATA_PAGE_SIZE))
try:
response = json.loads(page)
except ValueError as err:
raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
if 'entry' not in response['feed']:
return
# Extract video identifiers
entries = response['feed']['entry']
for entry in entries:
title = entry['title']['$t']
video_id = entry['id']['$t'].split('/')[-1]
yield {
'_type': 'url',
'url': video_id,
'ie_key': 'Youtube',
'id': video_id,
'title': title,
}
url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
return self.playlist_result(url_results, playlist_title=username)
class YoutubeSearchIE(SearchInfoExtractor):
IE_DESC = 'YouTube.com searches'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_MAX_RESULTS = 1000
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
video_ids = []
pagenum = 0
limit = n
PAGE_SIZE = 50
while (PAGE_SIZE * pagenum) < limit:
result_url = self._API_URL % (
compat_urllib_parse.quote_plus(query.encode('utf-8')),
(PAGE_SIZE * pagenum) + 1)
data_json = self._download_webpage(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % (pagenum + 1),
errnote='Unable to download API page')
data = json.loads(data_json)
api_response = data['data']
if 'items' not in api_response:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
limit = min(n, api_response['totalItems'])
pagenum += 1
if len(video_ids) > n:
video_ids = video_ids[:n]
videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse.unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
[r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(InfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'http://www.youtube.com/show/airdisasters',
'playlist_mincount': 3,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
url, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
entries = [
self.url_result(
'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
for season in m_seasons
]
title = self._og_search_title(webpage, fatal=False)
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'entries': entries,
}
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for extractors that fetch info from
http://www.youtube.com/feed_ajax
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
# use action_load_personal_feed instead of action_load_system_feed
_PERSONAL_FEED = False
@property
def _FEED_TEMPLATE(self):
action = 'action_load_system_feed'
if self._PERSONAL_FEED:
action = 'action_load_personal_feed'
return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
feed_entries = []
paging = 0
for i in itertools.count(1):
info = self._download_json(
self._FEED_TEMPLATE % paging,
'%s feed' % self._FEED_NAME,
'Downloading page %s' % i,
transform_source=uppercase_escape)
feed_html = info.get('feed_html') or info.get('content_html')
load_more_widget_html = info.get('load_more_widget_html') or feed_html
m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
ids = orderedSet(m.group(1) for m in m_ids)
feed_entries.extend(
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in ids)
mobj = re.search(
r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
load_more_widget_html)
if mobj is None:
break
paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
_FEED_NAME = 'watch_later'
_PLAYLIST_TITLE = 'Youtube Watch Later'
_PERSONAL_FEED = True
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PERSONAL_FEED = True
_PLAYLIST_TITLE = 'Youtube Watch History'
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeSubscriptionsIE(YoutubePlaylistIE):
IE_NAME = 'youtube:subscriptions'
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_TESTS = []
def _real_extract(self, url):
title = 'Youtube Subscriptions'
page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
new_ids = orderedSet(matches)
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), title,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return {
'_type': 'playlist',
'title': title,
'entries': self._ids_to_results(ids),
}
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| unlicense | -1,893,902,528,345,365,200 | 43.208525 | 226 | 0.517656 | false |
stuser/temp | web_crawler/thread_sample.py | 1 | 2625 | import threading
import time
import requests
import datetime as dt
import NTUH_clinic as nc
# 門診查詢參數檔案(.CSV file)
ParamaterFileName = "NTUH_params"
# 查詢動作的時間間隔
interval = 300 # sec
def query(sess, classname, directory, url, hosp, dept, ampm, querydate):
bs = nc.BsObject(url, hosp, dept, ampm, querydate, sess)
soup = bs.getQueryResult()
df = bs.convertDataToDataFrame(soup)
nc.exportDataToCSVfile(df, classname, directory,
hosp, dept, ampm, querydate)
sess.close()
def getAmPmFlag():
# clinic hour: Morning clinic 09:00~12:00 , Afternoon clinic 14:00~17:00 , Evening clinic 18:30-20:30
curr = dt.datetime.now()
am_start = dt.datetime(curr.year, curr.month, curr.day, 9, 0)
pm_start = dt.datetime(curr.year, curr.month, curr.day, 14, 0)
evn_start = dt.datetime(curr.year, curr.month, curr.day, 18, 30)
clinic_end = dt.datetime(curr.year, curr.month,
curr.day, 23, 0) # 查詢程式截止時間
ampm_flag = 0
if pm_start > curr >= am_start:
ampm_flag = 1 # Morning clinic
elif evn_start > curr >= pm_start:
ampm_flag = 2 # Afternoon clinic
elif clinic_end > curr >= evn_start:
ampm_flag = 3 # Evening clinic
else:
pass # print("非門診時段")
return ampm_flag
def demo():
AmPmFlag = getAmPmFlag()
# AmPmFlag = 1 #test code
if AmPmFlag != 0:
all_param_set = nc.loadParamaterFile(ParamaterFileName)
# 依門診時段取出該時段的查詢條件
param_set = all_param_set[all_param_set['ampm'] == str(AmPmFlag)]
# *the index use in for-loop, subset need reset index.
param_set = param_set.reset_index()
query_set_nums = len(param_set)
else:
query_set_nums = 0 # 非門診時段,設查詢條件筆數為0,即不執行查詢動作
for num in range(query_set_nums):
sess = requests.Session()
# print("param_set",param_set)
t = threading.Thread(target=query,
args=[sess,
param_set.classname[num],
param_set.directory[num],
param_set.url[num],
param_set.hosp[num],
param_set.dept[num],
param_set.ampm[num],
dt.datetime.now().strftime('%Y/%m/%d')])
t.start()
while True:
threading.Thread(target=demo).start()
time.sleep(interval)
| mit | 2,986,205,221,865,317,400 | 32.689189 | 106 | 0.561974 | false |
datafiniti/Diamond | src/collectors/httpd/httpd.py | 1 | 3932 | # coding=utf-8
"""
Collect stats from Apache HTTPD server using mod_status
#### Dependencies
* mod_status
* httplib
* urlparse
"""
import re
import httplib
import urlparse
import diamond.collector
class HttpdCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(HttpdCollector, self).__init__(*args, **kwargs)
if 'url' in self.config:
self.config['urls'].append(self.config['url'])
self.urls = {}
for url in self.config['urls']:
if ' ' in url:
parts = url.split(' ')
self.urls[parts[0]] = parts[1]
else:
self.urls[''] = url
def get_default_config_help(self):
config_help = super(HttpdCollector, self).get_default_config_help()
config_help.update({
'urls': "Urls to server-status in auto format, comma seperated,"
+ " Format 'nickname http://host:port/server-status?auto, "
+ ", nickname http://host:port/server-status?auto, etc'",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HttpdCollector, self).get_default_config()
config.update({
'path': 'httpd',
'urls': ['localhost http://localhost:8080/server-status?auto']
})
return config
def collect(self):
for nickname in self.urls.keys():
url = self.urls[nickname]
metrics = ['ReqPerSec', 'BytesPerSec', 'BytesPerReq',
'BusyWorkers', 'IdleWorkers', 'Total Accesses']
try:
while True:
# Parse Url
parts = urlparse.urlparse(url)
# Parse host and port
endpoint = parts[1].split(':')
if len(endpoint) > 1:
service_host = endpoint[0]
service_port = int(endpoint[1])
else:
service_host = endpoint[0]
service_port = 80
# Setup Connection
connection = httplib.HTTPConnection(service_host,
service_port)
url = "%s?%s" % (parts[2], parts[4])
connection.request("GET", url)
response = connection.getresponse()
data = response.read()
headers = dict(response.getheaders())
if ('location' not in headers
or headers['location'] == url):
connection.close()
break
url = headers['location']
connection.close()
except Exception, e:
self.log.error(
"Error retrieving HTTPD stats for host %s:%s, url '%s': %s",
service_host, str(service_port), url, e)
continue
exp = re.compile('^([A-Za-z ]+):\s+(.+)$')
for line in data.split('\n'):
if line:
m = exp.match(line)
if m:
k = m.group(1)
v = m.group(2)
if k in metrics:
# Get Metric Name
metric_name = "%s" % re.sub('\s+', '', k)
# Prefix with the nickname?
if len(nickname) > 0:
metric_name = nickname + '.' + metric_name
# Get Metric Value
metric_value = "%d" % float(v)
# Publish Metric
self.publish(metric_name, metric_value)
| mit | -1,247,745,794,665,574,100 | 32.322034 | 80 | 0.44532 | false |
kyokley/MediaConverter | main.py | 1 | 1082 | from tv_runner import TvRunner
from movie_runner import MovieRunner
from settings import (MEDIAVIEWER_INFER_SCRAPERS_URL,
SEND_EMAIL,
CELERY_VHOST,
)
from utils import postData, send_email
from celery import Celery
from log import LogFile
log = LogFile().getLogger()
app = Celery('tasks', broker='amqp://guest@localhost/%s' % CELERY_VHOST)
@app.task(name='main.main')
def main():
all_errors = []
tvRunner = TvRunner()
tv_errors = tvRunner.run()
movieRunner = MovieRunner()
movie_errors = movieRunner.run()
postData({}, MEDIAVIEWER_INFER_SCRAPERS_URL)
all_errors.extend(tv_errors)
all_errors.extend(movie_errors)
if all_errors:
log.error('Errors occured in the following files:')
for error in all_errors:
log.error(error)
if SEND_EMAIL:
subject = 'MC: Got some errors'
message = '\n'.join(all_errors)
send_email(subject, message)
log.info('All done')
if __name__ == '__main__':
main.delay()
| mit | -8,782,212,688,426,282,000 | 24.761905 | 72 | 0.609982 | false |
procrastinatio/mapproxy | mapproxy/test/unit/test_image.py | 2 | 22774 | # -:- encoding: utf8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from io import BytesIO
from mapproxy.compat.image import Image, ImageDraw
from mapproxy.image import ImageSource, ReadBufWrapper, is_single_color_image
from mapproxy.image import peek_image_format
from mapproxy.image.merge import merge_images
from mapproxy.image import _make_transparent as make_transparent, SubImageSource, img_has_transparency, quantize
from mapproxy.image.opts import ImageOptions
from mapproxy.image.tile import TileMerger, TileSplitter
from mapproxy.image.transform import ImageTransformer
from mapproxy.test.image import is_png, is_jpeg, is_tiff, create_tmp_image_file, check_format, create_debug_img, create_image
from mapproxy.srs import SRS
from nose.tools import eq_
from mapproxy.test.image import assert_img_colors_eq
from nose.plugins.skip import SkipTest
PNG_FORMAT = ImageOptions(format='image/png')
JPEG_FORMAT = ImageOptions(format='image/jpeg')
TIFF_FORMAT = ImageOptions(format='image/tiff')
class TestImageSource(object):
def setup(self):
self.tmp_filename = create_tmp_image_file((100, 100))
def teardown(self):
os.remove(self.tmp_filename)
def test_from_filename(self):
ir = ImageSource(self.tmp_filename, PNG_FORMAT)
assert is_png(ir.as_buffer())
assert ir.as_image().size == (100, 100)
def test_from_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
ir = ImageSource(tmp_file, 'png')
assert ir.as_buffer() == tmp_file
assert ir.as_image().size == (100, 100)
def test_from_image(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, (100, 100), PNG_FORMAT)
assert ir.as_image() == img
assert is_png(ir.as_buffer())
def test_from_non_seekable_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
data = tmp_file.read()
class FileLikeDummy(object):
# "file" without seek, like urlopen response
def read(self):
return data
ir = ImageSource(FileLikeDummy(), 'png')
assert ir.as_buffer(seekable=True).read() == data
assert ir.as_image().size == (100, 100)
assert ir.as_buffer().read() == data
def test_output_formats(self):
img = Image.new('RGB', (100, 100))
for format in ['png', 'gif', 'tiff', 'jpeg', 'GeoTIFF', 'bmp']:
ir = ImageSource(img, (100, 100), image_opts=ImageOptions(format=format))
yield check_format, ir.as_buffer(), format
def test_converted_output(self):
ir = ImageSource(self.tmp_filename, (100, 100), PNG_FORMAT)
assert is_png(ir.as_buffer())
assert is_jpeg(ir.as_buffer(JPEG_FORMAT))
assert is_jpeg(ir.as_buffer())
assert is_tiff(ir.as_buffer(TIFF_FORMAT))
assert is_tiff(ir.as_buffer())
def test_output_formats_greyscale_png(self):
img = Image.new('L', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_greyscale_alpha_png(self):
img = Image.new('LA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'LA'
assert img.getpixel((0, 0)) == (0, 0)
def test_output_formats_png8(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_png24(self):
img = Image.new('RGBA', (100, 100))
image_opts = PNG_FORMAT.copy()
image_opts.colors = 0 # TODO image_opts
ir = ImageSource(img, image_opts=image_opts)
img = Image.open(ir.as_buffer())
eq_(img.mode, 'RGBA')
assert img.getpixel((0, 0)) == (0, 0, 0, 0)
class TestSubImageSource(object):
def test_full(self):
sub_img = create_image((100, 100), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_larger(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_negative_offset(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(-50, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_overlap_right(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(75, 25), image_opts=ImageOptions(transparent=True)).as_image()
eq_(sorted(img.getcolors()), [(25*50, (100, 120, 130, 140)), (100*100-25*50, (255, 255, 255, 0))])
def test_outside(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(200, 0), image_opts=ImageOptions(transparent=True)).as_image()
eq_(img.getcolors(), [(100*100, (255, 255, 255, 0))])
class ROnly(object):
def __init__(self):
self.data = [b'Hello World!']
def read(self):
if self.data:
return self.data.pop()
return b''
def __iter__(self):
it = iter(self.data)
self.data = []
return it
class TestReadBufWrapper(object):
def setup(self):
rbuf = ROnly()
self.rbuf_wrapper = ReadBufWrapper(rbuf)
def test_read(self):
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
eq_(self.rbuf_wrapper.read(), b'')
def test_seek_read(self):
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
def test_iter(self):
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [])
def test_seek_iter(self):
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
def test_hasattr(self):
assert hasattr(self.rbuf_wrapper, 'seek')
assert hasattr(self.rbuf_wrapper, 'readline')
class TestMergeAll(object):
def setup(self):
self.cleanup_tiles = []
def test_full_merge(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
def test_one(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(transparent=True)
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
def test_missing_tiles(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
self.tiles.extend([None]*8)
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(80000, (255, 255, 255)), (10000, (0, 0, 0)), ])
def test_invalid_tile(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
invalid_tile = self.tiles[0].source
with open(invalid_tile, 'wb') as tmp:
tmp.write(b'invalid')
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions(bgcolor=(200, 0, 50))
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(10000, (200, 0, 50)), (80000, (0, 0, 0))])
assert not os.path.isfile(invalid_tile)
def test_none_merge(self):
tiles = [None]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(mode='RGBA', bgcolor=(200, 100, 30, 40))
result = m.merge(tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.getcolors(), [(100*100, (200, 100, 30, 40))])
def teardown(self):
for tile_fname in self.cleanup_tiles:
if tile_fname and os.path.isfile(tile_fname):
os.remove(tile_fname)
class TestGetCrop(object):
def setup(self):
self.tmp_file = create_tmp_image_file((100, 100), two_colored=True)
self.img = ImageSource(self.tmp_file,
image_opts=ImageOptions(format='image/png'), size=(100, 100))
def teardown(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def test_perfect_match(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (100, 100), bbox, image_opts=None)
assert self.img == result
def test_simple_resize_nearest(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='nearest'))
img = result.as_image()
eq_(img.size, (200, 200))
eq_(len(img.getcolors()), 2)
def test_simple_resize_bilinear(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='bilinear'))
img = result.as_image()
eq_(img.size, (200, 200))
# some shades of grey with bilinear
assert len(img.getcolors()) >= 4
class TestLayerMerge(object):
def test_opacity_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (127, 127, 255))
def test_opacity_merge_mixed_modes(self):
img1 = ImageSource(Image.new('RGBA', (10, 10), (255, 0, 255, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)).convert('P'),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert_img_colors_eq(img, [
(10*10, (127, 127, 255, 255)),
])
def test_paletted_merge(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
# generate RGBA images with a transparent rectangle in the lower right
img1 = ImageSource(Image.new('RGBA', (50, 50), (0, 255, 0, 255))).as_image()
draw = ImageDraw.Draw(img1)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
paletted_img = quantize(img1, alpha=True)
assert img_has_transparency(paletted_img)
assert paletted_img.mode == 'P'
rgba_img = Image.new('RGBA', (50, 50), (255, 0, 0, 255))
draw = ImageDraw.Draw(rgba_img)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
img1 = ImageSource(paletted_img)
img2 = ImageSource(rgba_img)
# generate base image and merge the others above
img3 = ImageSource(Image.new('RGBA', (50, 50), (0, 0, 255, 255)))
result = merge_images([img3, img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert img.mode == 'RGBA'
eq_(img.getpixel((49, 49)), (0, 0, 255, 255))
eq_(img.getpixel((0, 0)), (255, 0, 0, 255))
def test_solid_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (0, 255, 255))
class TestLayerCompositeMerge(object):
def test_composite_merge(self):
# http://stackoverflow.com/questions/3374878
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
img1 = Image.new('RGBA', size=(100, 100), color=(255, 0, 0, 255))
draw = ImageDraw.Draw(img1)
draw.rectangle((33, 0, 66, 100), fill=(255, 0, 0, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0, 0, 0))
img1 = ImageSource(img1)
img2 = Image.new('RGBA', size =(100, 100), color=(0, 255, 0, 255))
draw = ImageDraw.Draw(img2)
draw.rectangle((0, 33, 100, 66), fill=(0, 255, 0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 255, 0, 0))
img2 = ImageSource(img2)
result = merge_images([img2, img1], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(1089, (0, 255, 0, 255)),
(1089, (255, 255, 255, 0)),
(1122, (0, 255, 0, 128)),
(1122, (128, 126, 0, 255)),
(1122, (255, 0, 0, 128)),
(1156, (170, 84, 0, 191)),
(3300, (255, 0, 0, 255))])
def test_composite_merge_opacity(self):
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
bg = Image.new('RGBA', size=(100, 100), color=(255, 0, 255, 255))
bg = ImageSource(bg)
fg = Image.new('RGBA', size =(100, 100), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(fg)
draw.rectangle((10, 10, 89, 89), fill=(0, 255, 255, 255))
fg = ImageSource(fg, image_opts=ImageOptions(opacity=0.5))
result = merge_images([bg, fg], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(3600, (255, 0, 255, 255)),
(6400, (128, 127, 255, 255))])
class TestTransform(object):
def setup(self):
self.src_img = ImageSource(create_debug_img((200, 200), transparent=False))
self.src_srs = SRS(31467)
self.dst_size = (100, 150)
self.dst_srs = SRS(4326)
self.dst_bbox = (0.2, 45.1, 8.3, 53.2)
self.src_bbox = self.dst_srs.transform_bbox_to(self.src_srs, self.dst_bbox)
def test_transform(self, mesh_div=4):
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=mesh_div)
result = transformer.transform(self.src_img, self.src_bbox, self.dst_size, self.dst_bbox,
image_opts=ImageOptions(resampling='nearest'))
assert isinstance(result, ImageSource)
assert result.as_image() != self.src_img
assert result.size == (100, 150)
def _test_compare_mesh_div(self):
"""
Create transformations with different div values.
"""
for div in [1, 2, 4, 6, 8, 12, 16]:
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=div)
result = transformer.transform(self.src_img, self.src_bbox,
self.dst_size, self.dst_bbox)
result.as_image().save('/tmp/transform-%d.png' % (div,))
class TestSingleColorImage(object):
def test_one_point(self):
img = Image.new('RGB', (100, 100), color='#ff0000')
draw = ImageDraw.Draw(img)
draw.point((99, 99))
del draw
assert not is_single_color_image(img)
def test_solid(self):
img = Image.new('RGB', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2))
def test_solid_w_alpha(self):
img = Image.new('RGBA', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2, 255))
def test_solid_paletted_image(self):
img = Image.new('P', (100, 100), color=20)
palette = []
for i in range(256):
palette.extend((i, i//2, i%3))
img.putpalette(palette)
eq_(is_single_color_image(img), (20, 10, 2))
class TestMakeTransparent(object):
def _make_test_image(self):
img = Image.new('RGB', (50, 50), (130, 140, 120))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120))
return img
def _make_transp_test_image(self):
img = Image.new('RGBA', (50, 50), (130, 140, 120, 100))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120, 120))
return img
def test_result(self):
img = self._make_test_image()
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_with_color_fuzz(self):
img = self._make_test_image()
img = make_transparent(img, (128, 154, 121), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_no_match(self):
img = self._make_test_image()
img = make_transparent(img, (130, 160, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 255))]
def test_from_paletted(self):
img = self._make_test_image().quantize(256)
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
eq_(colors, [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))])
def test_from_transparent(self):
img = self._make_transp_test_image()
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, 4, 4), fill=(130, 100, 120, 0))
draw.rectangle((5, 5, 9, 9), fill=(130, 150, 120, 255))
img = make_transparent(img, (130, 150, 120, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = sorted(img.getcolors(), reverse=True)
eq_(colors, [(1550, (130, 140, 120, 100)), (900, (130, 150, 120, 0)),
(25, (130, 150, 120, 255)), (25, (130, 100, 120, 0))])
class TestTileSplitter(object):
def test_background_larger_crop(self):
img = ImageSource(Image.new('RGB', (356, 266), (130, 140, 120)))
img_opts = ImageOptions('RGB')
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120)), (256*256-10*100, (255, 255, 255))])
def test_background_larger_crop_with_transparent(self):
img = ImageSource(Image.new('RGBA', (356, 266), (130, 140, 120, 255)))
img_opts = ImageOptions('RGBA', transparent=True)
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120, 255))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120, 255)), (256*256-10*100, (255, 255, 255, 0))])
class TestHasTransparency(object):
def test_rgb(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGB', (10, 10))
assert not img_has_transparency(img)
img = quantize(img, alpha=False)
assert not img_has_transparency(img)
def test_rbga(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGBA', (10, 10), (100, 200, 50, 255))
img.paste((255, 50, 50, 0), (3, 3, 7, 7))
assert img_has_transparency(img)
img = quantize(img, alpha=True)
assert img_has_transparency(img)
class TestPeekImageFormat(object):
def test_peek(self):
yield self.check, 'png', 'png'
yield self.check, 'tiff', 'tiff'
yield self.check, 'gif', 'gif'
yield self.check, 'jpeg', 'jpeg'
yield self.check, 'bmp', None
def check(self, format, expected_format):
buf = BytesIO()
Image.new('RGB', (100, 100)).save(buf, format)
eq_(peek_image_format(buf), expected_format)
| apache-2.0 | 8,792,674,897,666,819,000 | 38.469671 | 125 | 0.584438 | false |
daskol/mipt-classifier | setup.py | 1 | 1380 | #!/usr/bin/env python3
# encoding: utf8
# setup.py
"""MIPT Student Classifier
"""
from setuptools import setup, find_packages
DOCLINES = (__doc__ or '').split('\n')
CLASSIFIERS = """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
Intended Audience :: Information Technology
Intended Audience :: Other Audience
License :: OSI Approved :: MIT License
Natural Language :: Russian
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3.5
Topic :: Internet
Topic :: Office/Business
Topic :: Utilities
"""
PLATFORMS = [
'Linux',
]
MAJOR = 0
MINOR = 0
PATCH = 0
VERSION = '{0:d}.{1:d}.{2:d}'.format(MAJOR, MINOR, PATCH)
def setup_package():
setup(name='miptclass',
version=VERSION,
description = DOCLINES[0],
long_description = '\n'.join(DOCLINES[2:]),
author='Daniel Bershatsky',
author_email='[email protected]',
license='MIT',
platforms=PLATFORMS,
classifiers=[line for line in CLASSIFIERS.split('\n') if line],
packages=find_packages(),
entry_points={
'console_scripts': [
'mipt-classifier=miptclass.cli:main',
],
},
)
if __name__ == '__main__':
setup_package()
| mit | -7,745,480,850,932,025,000 | 21.622951 | 72 | 0.62029 | false |
tensorflow/models | research/audioset/yamnet/params.py | 1 | 1847 | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameters for YAMNet."""
from dataclasses import dataclass
# The following hyperparameters (except patch_hop_seconds) were used to train YAMNet,
# so expect some variability in performance if you change these. The patch hop can
# be changed arbitrarily: a smaller hop should give you more patches from the same
# clip and possibly better performance at a larger computational cost.
@dataclass(frozen=True) # Instances of this class are immutable.
class Params:
sample_rate: float = 16000.0
stft_window_seconds: float = 0.025
stft_hop_seconds: float = 0.010
mel_bands: int = 64
mel_min_hz: float = 125.0
mel_max_hz: float = 7500.0
log_offset: float = 0.001
patch_window_seconds: float = 0.96
patch_hop_seconds: float = 0.48
@property
def patch_frames(self):
return int(round(self.patch_window_seconds / self.stft_hop_seconds))
@property
def patch_bands(self):
return self.mel_bands
num_classes: int = 521
conv_padding: str = 'same'
batchnorm_center: bool = True
batchnorm_scale: bool = False
batchnorm_epsilon: float = 1e-4
classifier_activation: str = 'sigmoid'
tflite_compatible: bool = False
| apache-2.0 | -6,389,886,753,464,823,000 | 35.215686 | 85 | 0.708175 | false |
marco-mariotti/selenoprofiles | libraries/networkx/algorithms/tests/test_core.py | 1 | 2003 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestCore:
def setUp(self):
# G is the example graph in Figure 1 from Batagelj and
# Zaversnik's paper titled An O(m) Algorithm for Cores
# Decomposition of Networks, 2003,
# http://arXiv.org/abs/cs/0310049. With nodes labeled as
# shown, the 3-core is given by nodes 1-8, the 2-core by nodes
# 9-16, the 1-core by nodes 17-20 and node 21 is in the
# 0-core.
t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1)
t2=nx.convert_node_labels_to_integers(t1,5)
G=nx.union(t1,t2)
G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19),
(12,18), (3,9), (7,9), (7,10), (9,10), (9,20),
(17,13), (13,14), (14,15), (15,16), (16,13)])
G.add_node(21)
self.G=G
# Create the graph H resulting from the degree sequence
# [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm.
degseq=[0,1,2,2,2,2,3]
self.H=nx.havel_hakimi_graph(degseq)
def test_trivial(self):
"""Empty graph"""
G = nx.Graph()
assert_equal(nx.find_cores(G),{})
def find_cores(self):
cores=find_cores(self.G)
nodes_by_core=[]
for val in [0,1,2,3]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[21])
assert_equal(nodes_by_core[1],[17, 18, 19, 20])
assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8])
def find_cores2(self):
cores=find_cores(self.H)
nodes_by_core=[]
for val in [0,1,2]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[0])
assert_equal(nodes_by_core[1],[1, 3])
assert_equal(nodes_by_core[2],[2, 4, 5, 6])
| gpl-2.0 | 6,964,820,110,641,476,000 | 36.092593 | 78 | 0.545182 | false |
kedz/cuttsum | wp-scripts/test-wtmf.py | 1 | 12192 | import corenlp as cnlp
from sklearn.metrics.pairwise import cosine_similarity
import re
import os
import gzip
import wtmf
from sklearn.externals import joblib
import cuttsum.events
import cuttsum.judgements
import pandas as pd
from collections import defaultdict
matches_df = cuttsum.judgements.get_merged_dataframe()
def heal_text(sent_text):
sent_text = sent_text.decode("utf-8")
sent_text = re.sub(
ur"[a-z ]+, [a-z][a-z ]+\( [a-z]+ \) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+, [a-z][a-z]+ [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z ]+\([^\)]+\) [-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(
ur"^.*?[a-z]+ +[-\u2014_]+ ",
r"", sent_text)
sent_text = re.sub(r"\([^)]+\)", r" ", sent_text)
sent_text = re.sub(ur"^ *[-\u2014_]+", r"", sent_text)
sent_text = re.sub(u" ([,.;?!]+)([\"\u201c\u201d'])", r"\1\2", sent_text)
sent_text = re.sub(r" ([:-]) ", r"\1", sent_text)
sent_text = re.sub(r"([^\d]\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(r"^(\d{1,3}) , (\d\d\d)([^\d]|$)", r"\1,\2\3", sent_text)
sent_text = re.sub(ur" ('|\u2019) ([a-z]|ll|ve|re)( |$)", r"\1\2 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+) ", r"\1 ", sent_text)
sent_text = re.sub(r" ([',.;?!]+)$", r"\1", sent_text)
sent_text = re.sub(r"(\d\.) (\d)", r"\1\2", sent_text)
sent_text = re.sub(r"(a|p)\. m\.", r"\1.m.", sent_text)
sent_text = re.sub(r"u\. (s|n)\.", r"u.\1.", sent_text)
sent_text = re.sub(
ur"\u201c ([^\s])",
ur"\u201c\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u201d",
ur"\1\u201d", sent_text)
sent_text = re.sub(
ur"\u2018 ([^\s])",
ur"\u2018\1", sent_text)
sent_text = re.sub(
ur"([^\s]) \u2019",
ur"\1\u2019", sent_text)
sent_text = re.sub(
ur"\u00e2",
ur"'", sent_text)
sent_text = re.sub(
r"^photo:reuters|^photo:ap",
r"", sent_text)
sent_text = sent_text.replace("\n", " ")
return sent_text.encode("utf-8")
nuggets = cuttsum.judgements.get_nuggets()
updates = pd.concat([
cuttsum.judgements.get_2013_updates(),
cuttsum.judgements.get_2014_sampled_updates()
])
#updates["text"] = updates["text"].apply(heal_text)
dom2type = {
"accidents": set(["accident"]),
"natural-disasters": set(["earthquake", "storm", "impact event"]),
"social-unrest": set(["protest", "riot"]),
"terrorism": set(["shooting", "bombing", "conflict", "hostage"]),
}
def tokenize(docs, norm, stop, ne, central_per=None, central_loc=None, central_org=None):
if stop:
with open("stopwords.txt", "r") as f:
sw = set([word.strip().decode("utf-8").lower() for word in f])
if norm == "stem":
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
all_toks = []
for doc in docs:
toks = []
for sent in doc:
if ne:
for tok in sent:
if tok.ne == "PERSON":
if unicode(tok.lem).lower() == central_per:
toks.append(u"__CPER__")
else:
toks.append(u"__PER__")
elif tok.ne == "LOCATION":
if unicode(tok.lem).lower() == central_loc:
toks.append(u"__CLOC__")
else:
toks.append(u"__LOC__")
elif tok.ne == "ORGANIZATION":
if unicode(tok.lem).lower() == central_org:
toks.append(u"__CORG__")
else:
toks.append(u"__ORG__")
else:
if norm == "lemma":
form = unicode(tok.lem).lower()
elif norm == "stem":
form = stemmer.stem(unicode(tok).lower())
else:
form = unicode(tok).lower()
if stop:
if form not in sw and len(form) < 50:
toks.append(form)
else:
if len(form) < 50:
toks.append(form)
else:
if norm == "lemma":
stoks = [unicode(tok.lem).lower() for tok in sent]
elif norm == "stem":
stoks = [stemmer.stem(unicode(tok).lower())
for tok in sent]
else:
stoks = [unicode(tok).lower() for tok in sent]
if stop:
toks.extend([tok for tok in stoks if tok not in sw])
else:
toks.extend(stoks)
toks = [tok for tok in toks if len(tok) < 50]
#if len(toks) == 0: continue
string = u" ".join(toks).encode("utf-8")
#print string
all_toks.append(string)
return all_toks
def find_central_nes(docs):
per_counts = defaultdict(int)
org_counts = defaultdict(int)
loc_counts = defaultdict(int)
for doc in docs:
for sent in doc:
for tok in sent:
if tok.ne == "PERSON":
per_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "LOCATION":
loc_counts[unicode(tok.lem).lower()] += 1
elif tok.ne == "ORGANIZATION":
org_counts[unicode(tok.lem).lower()] += 1
if len(per_counts) > 0:
central_per = max(per_counts.items(), key=lambda x:[1])[0]
else:
central_per = None
if len(org_counts) > 0:
central_org = max(org_counts.items(), key=lambda x:[1])[0]
else:
central_org = None
if len(loc_counts) > 0:
central_loc = max(loc_counts.items(), key=lambda x:[1])[0]
else:
central_loc = None
return central_per, central_loc, central_org
def main(input_path, output_path, norm, stop, ne, lam, port):
dirname, domain = os.path.split(input_path)
input_path = os.path.join(
dirname,
"{}.norm-{}{}{}.lam{:0.3f}.pkl".format(
domain, norm, ".stop" if stop else "", ".ne" if ne else "", lam))
print "Domain: {}".format(domain)
print "Model Path: {}".format(input_path)
events = [event for event in cuttsum.events.get_events()
if event.type in dom2type[domain] and event.query_num < 26 and event.query_num != 7]
if ne is True:
annotators = ["tokenize", "ssplit", "pos", "lemma", "ner"]
elif norm == "lemma":
annotators = ["tokenize", "ssplit", "pos", "lemma"]
else:
annotators = ["tokenize", "ssplit"]
results = []
vec = joblib.load(input_path)
modelname = "{}.norm_{}.stop_{}.ne_{}.lam_{}".format(domain, norm, stop, ne, lam)
with cnlp.Server(annotators=annotators, mem="6G", port=port,
max_message_len=1000000) as client:
for event in events:
print event
event_nuggets = nuggets.loc[nuggets["query id"] == event.query_id]
print "processing nugget text"
nugget_docs = [client.annotate(text)
for text in event_nuggets["text"].tolist()]
#for doc in nugget_docs:
# print doc
#print
if ne:
central_per, central_loc, central_org = find_central_nes(
nugget_docs)
else:
central_per = None
central_loc = None
central_org = None
X_nug_txt = tokenize(nugget_docs, norm, stop, ne,
central_per=central_per, central_loc=central_loc,
central_org=central_org)
nuggets.loc[nuggets["query id"] == event.query_id, "X"] = X_nug_txt
event_nuggets = nuggets[nuggets["query id"] == event.query_id]
event_nuggets = event_nuggets[event_nuggets["X"].apply(lambda x: len(x.split(" ")) < 50 and len(x.split(" ")) > 0)]
X_nug_txt = event_nuggets["X"].tolist()
#for txt in X_nug_txt:
# print txt
#print
print "transforming nugget text"
X_nug = vec.transform(X_nug_txt)
assert X_nug.shape[0] == len(event_nuggets)
print "getting updates"
updates.loc[updates["query id"] == event.query_id, "text"] = \
updates.loc[updates["query id"] == event.query_id, "text"].apply(heal_text)
event_updates = updates[(updates["query id"] == event.query_id) & (updates["text"].apply(len) < 1000)]
print "processing update text"
docs = [client.annotate(text) for text in event_updates["text"].tolist()]
X_upd_txt = tokenize(docs, norm, stop, ne,
central_per=central_per, central_loc=central_loc,
central_org=central_org)
print "transforming update text"
X_upd = vec.transform(X_upd_txt)
for i, (index, nugget) in enumerate(event_nuggets.iterrows()):
boolean = (matches_df["query id"] == event.query_id) & (matches_df["nugget id"] == nugget["nugget id"])
match_ids = set(matches_df.loc[boolean, "update id"].tolist())
if len(match_ids) == 0: continue
#print index, nugget["nugget id"], nugget["text"]
#print X_nug[i]
if (X_nug[i] == 0).all(): continue
n_matches = 0
K = cosine_similarity(X_nug[i], X_upd)
for j in K.ravel().argsort()[::-1][:100]:
#print K[0,j],
#print event_updates.iloc[j]["text"]
if event_updates.iloc[j]["update id"] in match_ids:
n_matches += 1
#print
P100 = n_matches / 100.
optP100 = min(1., len(match_ids) / 100.)
nP100 = P100 / optP100
results.append(
{"model": modelname,
"nugget id": nugget["nugget id"],
"P@100": P100,
"opt P@100": optP100,
"normP@100":nP100
})
df = pd.DataFrame(results)
print df
print df["normP@100"].mean()
df["model"] = modelname
return results
# print len(event_updates)
#print event_updates["text"].apply(len).mean()
#print event_updates["text"].apply(heal_text).apply(len).max()
#print event_updates["text"].apply(heal_text).apply(len).median()
if __name__ == u"__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--output", type=str, required=False, default=None)
parser.add_argument("--port", type=int, required=True)
# parser.add_argument("--norm", choices=["stem", "lemma", "none"], type=str, required=True)
# parser.add_argument("--stop", action="store_true")
# parser.add_argument("--ne", action="store_true")
# parser.add_argument("--lam", type=float, required=True)
args = parser.parse_args()
dirname = os.path.dirname(args.output)
if dirname != "" and not os.path.exists(dirname):
os.makedirs(dirname)
data = []
for norm in ["none", "lemma", "stem"]:
for stop in [True, False]:
for ne in [True, False]:
for lam in [20., 10., 1., .1]:
data.extend(
main(args.input, args.output, norm, stop, ne, lam, args.port))
df = pd.DataFrame(data)
with open(args.output, "w") as f:
df.to_csv(f, sep="\t", index=False)
| apache-2.0 | -4,383,275,878,599,725,000 | 36.170732 | 127 | 0.485482 | false |
liosha2007/temporary-groupdocs-python-sdk | groupdocs/ApiClient.py | 1 | 11192 | #!/usr/bin/env python
"""Wordnik.com's Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates."""
from __future__ import print_function
import sys
import os
import re
import urllib
import urllib2
import httplib
import json
import datetime
import mimetypes
import base64
from models import *
from groupdocs.FileStream import FileStream
from groupdocs import version
class RequestSigner(object):
def __init__(self):
if type(self) == RequestSigner:
raise Exception("RequestSigner is an abstract class and cannot be instantiated.")
def signUrl(self, url):
raise NotImplementedError
def signContent(self, requestBody, headers):
raise NotImplementedError
class DefaultRequestSigner(RequestSigner):
def signUrl(self, url):
return url
def signContent(self, requestBody, headers):
return requestBody
class ApiClient(object):
"""Generic API client for Swagger client library builds"""
def __init__(self, requestSigner=None):
self.signer = requestSigner if requestSigner != None else DefaultRequestSigner()
self.cookie = None
self.headers = {'Groupdocs-Referer': '/'.join((version.__pkgname__, version.__version__))}
self.__debug = False
def setDebug(self, flag, logFilepath=None):
self.__debug = flag
self.__logFilepath = logFilepath
def addHeaders(self, **headers):
self.headers = headers
def callAPI(self, apiServer, resourcePath, method, queryParams, postData,
headerParams=None, returnType=str):
if self.__debug and self.__logFilepath:
stdOut = sys.stdout
logFile = open(self.__logFilepath, 'a')
sys.stdout = logFile
url = apiServer + resourcePath
headers = {}
if self.headers:
for param, value in self.headers.iteritems():
headers[param] = value
if headerParams:
for param, value in headerParams.iteritems():
headers[param] = value
isFileUpload = False
if not postData:
headers['Content-type'] = 'text/html'
elif isinstance(postData, FileStream):
isFileUpload = True
if postData.contentType:
headers['Content-type'] = postData.contentType
if postData.size:
headers['Content-Length'] = str(postData.size)
else:
headers['Content-type'] = 'application/json'
if self.cookie:
headers['Cookie'] = self.cookie
data = None
if queryParams:
# Need to remove None values, these should not be sent
sentQueryParams = {}
for param, value in queryParams.items():
if value != None:
sentQueryParams[param] = value
if sentQueryParams:
url = url + '?' + urllib.urlencode(sentQueryParams)
if method in ['POST', 'PUT', 'DELETE']:
if isFileUpload:
data = postData.inputStream
elif not postData:
data = ""
elif type(postData) not in [unicode, str, int, float, bool]:
data = self.signer.signContent(json.dumps(self.sanitizeForSerialization(postData)), headers)
else:
data = self.signer.signContent(postData, headers)
if self.__debug:
handler = urllib2.HTTPSHandler(debuglevel=1) if url.lower().startswith('https') else urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
request = MethodRequest(method=method, url=self.encodeURI(self.signer.signUrl(url)), headers=headers,
data=data)
try:
# Make the request
response = urllib2.urlopen(request)
if 'Set-Cookie' in response.headers:
self.cookie = response.headers['Set-Cookie']
if response.code == 200 or response.code == 201 or response.code == 202:
if returnType == FileStream:
fs = FileStream.fromHttp(response)
if self.__debug: print(">>>stream info: fileName=%s contentType=%s size=%s" % (fs.fileName, fs.contentType, fs.size))
return fs if 'Transfer-Encoding' in response.headers or (fs.size != None and int(fs.size) > 0) else None
else:
string = response.read()
if self.__debug: print(string)
try:
data = json.loads(string)
except ValueError: # PUT requests don't return anything
data = None
return data
elif response.code == 404:
return None
else:
string = response.read()
try:
msg = json.loads(string)['error_message']
except ValueError:
msg = string
raise ApiException(response.code, msg)
except urllib2.HTTPError, e:
raise ApiException(e.code, e.msg)
finally:
if isFileUpload:
try:
postData.inputStream.close()
except Exception, e:
sys.exc_clear()
if self.__debug and self.__logFilepath:
sys.stdout = stdOut
logFile.close()
def toPathValue(self, obj):
"""Serialize a list to a CSV string, if necessary.
Args:
obj -- data object to be serialized
Returns:
string -- json serialization of object"""
if type(obj) == list:
return ','.join(obj)
else:
return obj
def sanitizeForSerialization(self, obj):
"""Dump an object into JSON for POSTing."""
if not obj:
return None
elif type(obj) in [unicode, str, int, long, float, bool]:
return obj
elif type(obj) == list:
return [self.sanitizeForSerialization(subObj) for subObj in obj]
elif type(obj) == datetime.datetime:
return obj.isoformat()
else:
if type(obj) == dict:
objDict = obj
else:
objDict = obj.__dict__
ret_dict = {}
for (key, val) in objDict.iteritems():
if key != 'swaggerTypes' and val != None:
ret_dict[key] = self.sanitizeForSerialization(val)
return ret_dict
def deserialize(self, obj, objClass):
"""Derialize a JSON string into an object.
Args:
obj -- string or object to be deserialized
objClass -- class literal for deserialzied object, or string of class name
Returns:
object -- deserialized object"""
if not obj:
return None
# Have to accept objClass as string or actual type. Type could be a
# native Python type, or one of the model classes.
if type(objClass) == str:
if 'list[' in objClass:
match = re.match('list\[(.*)\]', objClass)
subClass = match.group(1)
return [self.deserialize(subObj, subClass) for subObj in obj]
if (objClass in ['int', 'float', 'long', 'dict', 'list', 'str']):
objClass = eval(objClass)
else: # not a native type, must be model class
objClass = eval(objClass + '.' + objClass)
if objClass in [unicode, str, int, long, float, bool]:
return objClass(obj)
elif objClass == datetime:
# Server will always return a time stamp in UTC, but with
# trailing +0000 indicating no offset from UTC. So don't process
# last 5 characters.
return datetime.datetime.strptime(obj[:-5],
"%Y-%m-%dT%H:%M:%S.%f")
instance = objClass()
for attr, attrType in instance.swaggerTypes.iteritems():
lc_attr = attr[0].lower() + attr[1:]
uc_attr = attr[0].upper() + attr[1:]
real_attr = None
if attr in obj:
real_attr = attr
elif lc_attr in obj:
real_attr = lc_attr
elif uc_attr in obj:
real_attr = uc_attr
if real_attr != None:
value = obj[real_attr]
if not value:
setattr(instance, real_attr, None)
elif attrType in ['str', 'int', 'long', 'float', 'bool']:
attrType = eval(attrType)
try:
value = attrType(value)
except UnicodeEncodeError:
value = unicode(value)
setattr(instance, real_attr, value)
elif 'list[' in attrType:
match = re.match('list\[(.*)\]', attrType)
subClass = match.group(1)
subValues = []
for subValue in value:
subValues.append(self.deserialize(subValue,
subClass))
setattr(instance, real_attr, subValues)
else:
setattr(instance, real_attr, self.deserialize(value,
attrType))
return instance
@staticmethod
def encodeURI(url):
encoded = urllib.quote(url, safe='~@#$&()*!=:;,.?/\'').replace("%25", "%")
return encoded
@staticmethod
def encodeURIComponent(url):
return urllib.quote(url, safe='~()*!.\'')
@staticmethod
def readAsDataURL(filePath):
mimetype = mimetypes.guess_type(filePath, False)[0] or "application/octet-stream"
filecontents = open(filePath, 'rb').read()
return 'data:' + mimetype + ';base64,' + base64.b64encode(filecontents).decode()
class MethodRequest(urllib2.Request):
def __init__(self, *args, **kwargs):
"""Construct a MethodRequest. Usage is the same as for
`urllib2.Request` except it also takes an optional `method`
keyword argument. If supplied, `method` will be used instead of
the default."""
if 'method' in kwargs:
self.method = kwargs.pop('method')
return urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
return getattr(self, 'method', urllib2.Request.get_method(self))
class ApiException(Exception):
def __init__(self, code, *args):
super(Exception, self).__init__((code, ) + args)
self.code = code
| apache-2.0 | 1,865,152,210,899,522,800 | 34.871795 | 137 | 0.536455 | false |
naoliv/osmose-backend | analysers/analyser_merge_public_transport_FR_transgironde.py | 1 | 3717 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Merge import Analyser_Merge, Source, Load, Mapping, Select, Generate
class Analyser_Merge_Public_Transport_FR_TransGironde(Analyser_Merge):
def __init__(self, config, logger = None):
self.missing_official = {"item":"8040", "class": 41, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop not integrated") }
self.possible_merge = {"item":"8041", "class": 43, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop, integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://www.datalocale.fr/drupal7/dataset/ig_transgironde_pa",
name = u"Localisation des points d'arrêts des lignes régulières du réseau TransGironde",
file = "public_transport_FR_transgironde.csv.bz2"),
Load("LON", "LAT", table = "transgironde"),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"highway": "bus_stop"}),
osmRef = "ref:FR:TransGironde",
conflationDistance = 100,
generate = Generate(
static = {
"source": u"Conseil général de la Gironde - 03/2013",
"highway": "bus_stop",
"public_transport": "stop_position",
"bus": "yes",
"network": "TransGironde"},
mapping = {
"ref:FR:TransGironde": "NUMERO_PEG",
"name": lambda res: res['NOM'].split(' - ')[1] if len(res['NOM'].split(' - ')) > 1 else None},
text = lambda tags, fields: {"en": u"TransGironde stop of %s" % fields["NOM"], "fr": u"Arrêt TransGironde de %s" % fields["NOM"]} )))
def replace(self, string):
for s in self.replacement.keys():
string = string.replace(s, self.replacement[s])
return string
replacement = {
u'Coll.': u'Collège',
u'Pl.': u'Place',
u'Eglise': u'Église',
u'Rte ': u'Route ',
u'Bld ': u'Boulevard',
u'St ': u'Staint ',
u'Av. ': u'Avenue',
u'Hôp.': u'Hôpital',
}
| gpl-3.0 | -8,028,650,922,773,014,000 | 53.470588 | 169 | 0.461123 | false |
darogan/ParticleStats | scripts/ParticleStats_Compare.py | 1 | 27381 | #!/usr/bin/env python
###############################################################################
# ____ _ _ _ ____ _ _ #
# | _ \ __ _ _ __| |_(_) ___| | ___/ ___|| |_ __ _| |_ ___ #
# | |_) / _` | '__| __| |/ __| |/ _ \___ \| __/ _` | __/ __| #
# | __/ (_| | | | |_| | (__| | __/___) | || (_| | |_\__ \ #
# |_| \__,_|_| \__|_|\___|_|\___|____/ \__\__,_|\__|___/ #
# #
###############################################################################
# ParticleStats: Open source software for the analysis of particle #
# motility and cytoskelteal polarity #
# #
# Contact: [email protected] #
# http://www.ParticleStats.com #
# Centre for Trophoblast Research #
# University of Cambridge #
# Copyright (C) 2017 Russell S. Hamilton #
# #
# Please cite: #
# Hamilton, R.S. et al (2010) Nucl. Acids Res. Web Server Edition #
# http://dx.doi.org/10.1093/nar/gkq542 #
###############################################################################
# GNU Licence Details: #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import os, sys, math
import os.path
from optparse import OptionParser
###############################################################################
# PARSE IN THE USER OPTIONS
###############################################################################
parser = OptionParser(usage="%prog [--a=ExcelFile1] [--b=ExcelFile2]",
version="%prog 2.001")
parser.add_option("-a", "--xls1", metavar="EXCELFILE1",
dest="ExcelFile1",
help="Name of first Excel File")
parser.add_option("-b", "--xls2", metavar="EXCELFILE2",
dest="ExcelFile2",
help="Name of second Excel File")
parser.add_option("-o", metavar="OUTPUTTYPE",
dest="OutputType", default="text",
help="print text ot html style output: DEFAULT=text")
parser.add_option("--outdir", metavar="OUTPUTDIR",
dest="OutputDir",
help="Specify a directory for the output files")
parser.add_option("--outhtml", metavar="OUTPUTHTML",
dest="OutputHTML",
help="Specify a web location for the HTML output")
parser.add_option("--trackingtype", metavar="TrackingType",
dest="TrackingType", default="999",
help="Source of tracked coords: DEFAULT=metamorph")
parser.add_option("-g", "--graphs",
dest="graphs", action="store_true",
help="print graphs")
parser.add_option("-t", "--trails",
dest="trails", action="store_true",
help="print trails")
parser.add_option("-r", "--regression",
dest="regression", action="store_true",
help="Run linear regression analysis")
parser.add_option("-d", "--debug",
dest="debug", action="store_true",
help="print full debug output")
parser.add_option("--timestart",
dest="TimeStart", metavar="TIMESTART",
default="0",
help="Provide a time point start point for movement calculations")
parser.add_option("--timeend",
dest="TimeEnd", metavar="TIMEEND",
default="90",
help="Provide a time point end point for movement calculations")
parser.add_option("--pausedefinition", metavar="PAUSEDEF",
dest="PauseDef", default="distance",
help="Pause definition: speed or distance DEFAULT=distance")
parser.add_option("--rundistance", metavar="RUNDISTANCE",
dest="RunDistance", default="1.1",
help="Run Distance in nm: DEFAULT=1.1")
parser.add_option("--runframes", metavar="RUNFRAMES",
dest="RunFrames", default="0",
help="Run Frames: DEFAULT=0")
parser.add_option("--pausedistance", metavar="PAUSEDISTANCE",
dest="PauseDistance", default="10",
help="Pause Distance in nm: DEFAULT=10")
parser.add_option("--pauseduration", metavar="PAUSEDURATION",
dest="PauseDuration", default="2000",
help="Pause Duration in miliseconds: DEFAULT=2000")
parser.add_option("--pausespeed", metavar="PAUSESPEED",
dest="PauseSpeed", default="0.25",
help="Pause Speed: DEFAULT=0.25")
parser.add_option("--pauseframes", metavar="PAUSEFRAMES",
dest="PauseFrames", default="3",
help="Pause Frames: DEFAULT=3")
parser.add_option("--reverseframes", metavar="REVERSEFRAMES",
dest="ReverseFrames", default="2",
help="Reverse Frames: DEFAULT=2")
parser.add_option("--flipY", metavar="FLIPY",
dest="FlipY", action="store_true",
help="Changes the default orientation for the Y axis. \
Default y=0 is at the top of the image")
parser.add_option("--imagesize", metavar="IMAGESIZE",
dest="ImageSize", default="512",
help="Image size to define the range of the coordinates DEFAULT=512")
parser.add_option("--pixelratio", metavar="PIXELRATIO",
dest="PixelRatio", default="1.00",
help="Pixel Ratio (nm per pixel): DEFAULT=1.00")
parser.add_option("--pixelratiomethod", metavar="PIXELRATIOMETHOD",
dest="PixelRatioMethod", default="multiply",
help="Pixel Ratio calculation method <multiply/divide>: \
DEFAULT=multiply")
parser.add_option("--dimensions", metavar="DIMENSIONS",
dest="Dimensions", default="2D",
help="Number of dimensions (1DX, 1DY, 2D): DEFAULT=2D")
(options, args) = parser.parse_args()
options.RunDistance = float(options.RunDistance)
options.PauseDuration = float(options.PauseDuration)
options.PauseDistance = float(options.PauseDistance)
options.ReverseFrames = float(options.ReverseFrames)
options.PixelRatio = float(options.PixelRatio)
options.TimeStart = int(options.TimeStart)
options.TimeEnd = int(options.TimeEnd)
if (options.OutputType != "html") and (options.OutputType != "text"):
print "Error with input parameters (run -h flag for help)"
print "--output must be html or text"
sys.exit()
if options.ExcelFile1 and options.ExcelFile2:
XLS1 = options.ExcelFile1
XLS2 = options.ExcelFile2
else:
print "Error with input parameters (run -h flag for help)"
print "Two Excel Files must be provided"
sys.exit()
if options.graphs: Graphs = "graphs"
else: Graphs = "nographs"
if options.PauseDef != "speed" and options.PauseDef != "distance":
print "Error with input parameters (run -h flag for help)"
print "Pause Definition must be either speed or distance"
sys.exit()
else:
if options.PauseDef == "speed": PauseDef = 1
elif options.PauseDef == "distance": PauseDef = 2
###############################################################################
# LOAD IN THE REQUIRED MODULES ONLY AFTER MAIN USER OPTIONS CHECKED
###############################################################################
print "\nLoading External Modules..."
import ParticleStats.ParticleStats_Inputs as PS_Inputs
import ParticleStats.ParticleStats_Outputs as PS_Outputs
import ParticleStats.ParticleStats_Maths as PS_Maths
import ParticleStats.ParticleStats_Plots as PS_Plots
import numpy as na
import re
print "Loading complete\n\n"
#Print the welcome logo plus data and run mode
FontSize_Titles = 2
FontSize_Text = 1
#if(options.OutputType == "html"):
#BaseDir = "http://idcws.bioch.ox.ac.uk/~rhamilto/ParticleStats2/"
# BaseDir = ""
#else:
if(options.OutputHTML):
BaseDir = options.OutputHTML
else:
BaseDir = ""
#DirGraphs = BaseDir+"GraphOutput/"
if(options.OutputDir):
DirGraphs = options.OutputDir
else:
DirGraphs = os.getcwd()
DirTrails = BaseDir
ImageFileSearchPath = os.getcwd()
if(options.OutputType == "html"):
PS_Outputs.Print_HTMLHeaders()
PS_Outputs.Print_Welcome(options.OutputType,FontSize_Text)
###############################################################################
# READ IN THE EXCEL FILES
###############################################################################
if options.FlipY:
FlipYImgSize = int(options.ImageSize)
else:
FlipYImgSize = 0
FDs = []
#READ IN EXCEL FILE 1 AND EXTRACT INFO
(InputDirName1, InputFileName1) = os.path.split(XLS1)
Coords1,Corrections1,Axes1 = PS_Inputs.ReadExcelCoords(XLS1,options.PixelRatio,\
options.PixelRatioMethod,\
options.TimeStart,options.TimeEnd,\
FlipYImgSize)
FDs.append({'InputDirName':InputDirName1,'InputFileName':InputFileName1,\
'Coords':Coords1, 'Corrections':Corrections1, 'Axes':Axes1 })
#READ IN EXCEL FILE 2 AND EXTRACT INFO
(InputDirName2, InputFileName2) = os.path.split(XLS2)
Coords2,Corrections2,Axes2 = PS_Inputs.ReadExcelCoords(XLS2,options.PixelRatio,\
options.PixelRatioMethod,\
options.TimeStart,options.TimeEnd,\
FlipYImgSize)
FDs.append({'InputDirName':InputDirName2,'InputFileName':InputFileName2,\
'Coords':Coords2, 'Corrections':Corrections2, 'Axes':Axes2 })
del(InputDirName1,InputDirName2,InputFileName1,InputFileName2)
del(Coords1,Coords2,Corrections1,Corrections2,Axes1,Axes2)
if((options.OutputType == 'html') and \
((len(FDs[0]['Coords']) > 200) or (len(FDs[1]['Coords']) > 200) )):
print len(FDs[0]['Coords'])
print len(FDs[1]['Coords'])
print PS_Inputs.Colourer("### Too many particles in input files - limit = 200 ###",\
"red",options.OutputType,"bold",FontSize_Titles)
sys.exit()
PS_Outputs.Print_Parameters( FDs[0]['InputFileName'],FDs[0]['Coords'], \
FDs[1]['InputFileName'],FDs[1]['Coords'], \
options.OutputType,FontSize_Text )
Colours = ["red","blue","green","purple","orange","yellow",\
"silver","cyan","brown","magenta","silver","gold"]
Colours = Colours * 100
Separator = "" + ("+"*90)
#sys.exit(0)
###############################################################################
# DRAW IMAGE STACK
###############################################################################
#print "\tReading Image Stack"
#ImageStack = ParticleStats_Inputs.ReadImageFiles("")
#ParticleStats_Inputs.DrawCoords(ImageStack,Coords,"geo",Colours)
###############################################################################
# RUN FUNCTIONS ON COORD DATA READ IN - MAIN PROGRAM LOOP
###############################################################################
RealAllRuns = []
RealAllRunsX = []
RunsHash = []
RunsHash2 = []
RunsHashAll = []
coordset = 0
while coordset < len(FDs):
print PS_Inputs.Colourer(("### Running Coords Set "+str(coordset+1)+" ###"),"black",\
options.OutputType,"bold",FontSize_Titles)
print PS_Inputs.Colourer(" Excel File = "+FDs[coordset]['InputFileName'],"black",\
options.OutputType,"bold",FontSize_Text)
AllRuns = []
AllRuns_X = []
Stats_Global_AveLen = []
Stats_Global_AveSpeed = []
FileOut = ""
RunCounter = 0;
i = 0
while i < len(FDs[coordset]['Coords']): #cycle through sheets
j = 0
while j < len(FDs[coordset]['Coords'][i]): #cycle through
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
# Sort out the coordinate alignment corrections
if len(FDs[coordset]['Corrections']) != 4:
print PS_Inputs.Colourer(" Applying correction coordinates ",\
"black",options.OutputType,"bold",FontSize_Text)
FDs[coordset]['Coords'][i][j] = PS_Maths.CorrectCoordinates(\
FDs[coordset]['Coords'][i][j],\
FDs[coordset]['Corrections'])
# Perform Cummulative Distance plotting
if(options.graphs):
DistanceCummulativePlot = PS_Plots.PlotDistanceVsTimeCummulative(\
FDs[coordset]['Coords'][i][j],i,j,\
("Coords"+str(coordset+1)),"msecs",\
"nm",DirGraphs)
if(options.OutputType=="html"):
IMG_Particle = "<A HREF='"+ BaseDir + DirGraphs+\
str(DistanceCummulativePlot)+".png'"+\
" TARGET=_blank><IMG WIDTH=200 "+\
"SRC='"+ BaseDir + DirGraphs + "/" +\
str(DistanceCummulativePlot)+".png' BORDER=0></A>"
elif(options.OutputType=="text"):
IMG_Particle = " Graph: Cummulative Distance vs Time "+\
str(DistanceCummulativePlot)+".png"
else:
if(options.OutputType=="html"):
IMG_Particle = " <FONT SIZE=1>Graph:<BR>NO IMAGE AVAILABLE</FONT>"
else:
IMG_Particle = " Graph: NO IMAGE AVAILABLE"
# Perform the linear regression but not the graph just yet
if(options.regression):
Regression = PS_Maths.KymoRegression(FDs[coordset]['Coords'][i][j],4,5)
#Regression = PS_Maths.Regression_CVersion(FDs[coordset]['Coords'][i][j],4,5)
print " Regression=[X=%6.3f,"%Regression['X'],\
"Intercept=%6.0f,"%Regression['Intercept'],\
"R2=%6.3f,"%Regression['R2'],"]"
#"aR2=%6.3f"%Regression['aR2'],"]"
else:
Regression = ""
# Perform Trail drawing
IMG_Trails = ""
if( options.trails):
ImageFiles = PS_Inputs.FindImageFiles(\
FDs[coordset]['Coords'][i][j][0][0],ImageFileSearchPath)
else:
ImageFiles = []
if( len(ImageFiles) > 0) and (options.trails):
PatternN = re.compile(r'.*020.tif')
k = 0
while k < len(ImageFiles):
IMG_Trails = " Trail Image: NO IMAGE AVAILABLE"
if (PatternN.match(os.path.basename(ImageFiles[k]))):
FirstImage = ImageFiles[k]
TrailImage = PS_Inputs.DrawTrailsOnImageFile(FirstImage,i,\
FDs[coordset]['InputFileName'],Colours[i][j],\
FDs[coordset]['Coords'][i][j],\
options.PixelRatio,Regression)
if( (options.OutputType == "html") and ( options.trails) ):
IMG_Trails = "<A HREF='"+ DirTrails + TrailImage + \
"' TARGET=_blank><IMG WIDTH=200 " + \
"HEIGHT=187 " + "SRC='" + DirTrails + \
TrailImage + "' BORDER=0></A>"
elif( (options.OutputType == "text") and ( options.trails) ):
IMG_Trails = " Trail Image:"+TrailImage
break
k += 1
else:
if(options.OutputType == "html"):
IMG_Trails = "<FONT SIZE=1>Trail Image:<BR>NO IMAGE AVAILABLE</FONT>"
else:
IMG_Trails = " Trail Image: NO IMAGE AVAILABLE"
Runs = []
Runs = PS_Maths.FindLongMovementsAndPausesRaquel( \
FDs[coordset]['Coords'][i][j], Regression,\
FDs[coordset]['Axes'],PauseDef, \
options.RunDistance,options.RunFrames, \
options.PauseDistance,options.PauseSpeed, \
options.PauseFrames,options.PauseDuration,\
options.ReverseFrames,options.PixelRatio,\
options.Dimensions,\
options.TimeStart, options.TimeEnd,\
options.debug)
Stats_Particle = PS_Maths.Stats_Particle(Runs)
Stats_Standards = PS_Maths.Stats_Standards(Runs)
RunsHash.append({'CoordsSet':coordset,'Sheet':i,'Particle':j,'Runs':Runs})
RunsHashAll.append({'CoordsSet':coordset,'Sheet':i,'Particle':j,'Runs':Runs})
AllRuns.append(Runs)
print "Runs for particle %4d sheet %2d" % (j, i),
print " (Excel Row=", FDs[coordset]['Coords'][i][j][0][6], \
" File=", FDs[coordset]['InputFileName'], ")"
print " No Coords =", len(FDs[coordset]['Coords'][i][j]), \
" No +ve Runs = ", Stats_Particle['No_Runs_P'], \
" No -ve Runs = ", Stats_Particle['No_Runs_N'], \
" No Pauses = ", Stats_Particle['No_Runs_0']
RunLine = ""
StatsLine = ""
Header = " Event Start End Dir Dist SDist" +\
" RDist Angle Speed SSpeed RSpeed Time"
print PS_Inputs.Colourer(Header,"grey",options.OutputType,"italics",FontSize_Text)
k = 0
while k < len(Runs):
AllRuns_X.append(Runs[k])
Error = ""
if( Runs[k][2] > 0): Event = "Run "; Colour = "red"
elif( Runs[k][2] < 0): Event = "Run "; Colour = "blue"
elif( Runs[k][2] == 0): Event = "Pause"; Colour = "green"
#if(abs(Runs[j][5]) <= 200 and abs(Runs[j][6]) <= 200 \
# and Runs[j][2] != 0):
# Colour = "purple"; Error = "ERROR? " + Event
#elif( abs(Runs[j][3]) > 300 and Runs[j][2] == 0):
# Colour = "cyan"; Error = "? " + Event
RunLine = PS_Outputs.Print_RunLine(Runs,k,Event,Error)
print PS_Inputs.Colourer(RunLine,Colour,options.OutputType,\
"",FontSize_Text)
RunCounter += 1
FileOut += PS_Outputs.Print_FileOut(Runs, RunCounter, i, k)
k += 1
StatsLine = PS_Outputs.Print_ParticleStatsLine(Stats_Particle,Stats_Standards)
print StatsLine
# Perform Linear Regression Graph drawing
if(options.regression):
#Regression = PS_Maths.KymoRegression(FDs[coordset]['Coords'][i][j],4,5)
#print " Regression=[X=%6.3f,"%Regression['X'],\
# "Intercept=%6.0f,"%Regression['Intercept'],\
# "R2=%6.3f,"%Regression['R2'],\
# "aR2=%6.3f"%Regression['aR2'],"]"
RegressionGraph = PS_Plots.RegressionGraph(\
FDs[coordset]['Coords'][i][j],(coordset+1),i,j,\
Regression,FDs[coordset]['Axes'],Runs,DirGraphs)
if( options.OutputType=="html"):
IMG_Regression = "<A HREF='"+BaseDir+DirGraphs+RegressionGraph+".png" + \
"' TARGET=_blank><IMG WIDTH=200 HEIGHT=187 " + \
"SRC='"+BaseDir+DirGraphs+RegressionGraph+".png' " + \
"BORDER=0></A>"
elif( options.OutputType=="text"):
IMG_Regression = " Regression Image: "+RegressionGraph+".png"
else:
Regression = ""
if(options.OutputType=="html"):
IMG_Regression = " <FONT SIZE=1>Regression Image:"+\
"<BR>NO IMAGE AVAILABLE</FONT"
else:
IMG_Regression = " Regression Image: NO IMAGE AVAILABLE"
if(options.OutputType == "text"):
print IMG_Particle
print IMG_Trails
print IMG_Regression
elif(options.OutputType == "html"):
print "<TABLE WIDTH=100%><TR><TD>"+IMG_Particle+"</TD>"+\
"<TD VALIGN=middle>"+IMG_Trails+"</TD>"+\
"<TD VALIGN=middle>"+IMG_Regression+"</TD>"+\
"</TR></TABLE>"
j += 1
if(options.graphs and j == (len(FDs[coordset]['Coords'][i]))):
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHash,500,coordset,i,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+\
".png --export-dpi=125 "+DirGraphs+"/"+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
if(options.OutputType=="html"):
IMG_Rose = "<B>Rose Diagram For Sheet "+str(i)+"</B><BR>" + \
"<A HREF='"+BaseDir+DirGraphs+RoseDiagram+".png" + \
"' TARGET=_blank><IMG WIDTH=200 HEIGHT=187 " + \
"SRC='"+BaseDir+DirGraphs+RoseDiagram+".png' " + \
"BORDER=0></A>"
else:
IMG_Rose = " RoseDiagram = "+RoseDiagram+".svg\n"+\
" RoseDiagram = "+RoseDiagram+".png\n"
print IMG_Rose
RunsHash2.append(RunsHash)
RunsHash = []
i += 1
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
# Print Out some Global stats
print PS_Inputs.Colourer("### Global Statistics ###","green",\
options.OutputType,"bold",FontSize_Titles)
Stats_Global = {}
Stats_Global = PS_Maths.Stats_Particle(AllRuns_X)
Stats_Standards = {}
Stats_Standards = PS_Maths.Stats_Standards(AllRuns_X)
GlobalStats = PS_Outputs.Print_GlobalStats ( AllRuns_X, Stats_Global, Stats_Standards )
print GlobalStats
Stats_Global_AveLen.append( [Stats_Global['Ave_RunLen_P'],Stats_Global['Ave_RunLen_N'],\
Stats_Standards['D_P_E'],Stats_Standards['D_N_E'] ] )
Stats_Global_AveSpeed.append( [Stats_Global['Ave_Speed_P'],Stats_Global['Ave_Speed_N'],\
Stats_Standards['S_P_E'],Stats_Standards['S_N_E'] ] )
# Call the graph drawing functions
print PS_Inputs.Colourer("### Produce Output Files ###","green",\
options.OutputType,"bold",FontSize_Titles)
if( options.graphs):
print PS_Inputs.Colourer((" Creating Runs graph for Coords Set "+\
str(coordset+1)+"..."),"black",\
options.OutputType,"",FontSize_Text)
PS_Plots.PlotRuns(AllRuns,options.PixelRatio,Colours,\
("Coords"+str(coordset+1)),DirGraphs)
#PS_Plots.PlotRunsFreq(AllRuns,Colours,("Coords"+str(coordset+1)),DirGraphs)
#Write Run Data Out to the Data File
print PS_Inputs.Colourer(" Creating Output Table for Coords Set "+str(coordset+1)+\
"...","black",options.OutputType,"",FontSize_Text)
PS_Outputs.Print_OutputFile((DirGraphs+"/ParticleStats_Coords"+str(coordset+1)+"_Output.text"),FileOut)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer("","green",options.OutputType,"bold",FontSize_Text)
RealAllRuns.append(AllRuns)
RealAllRunsX.append(AllRuns_X)
coordset += 1
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHashAll,500,0,99,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+".png --export-dpi=125 "+\
DirGraphs+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
print "RoseDiagram (coordsset=0) =", RoseDiagram
RoseDiagram = PS_Plots.PlotCompareRoseDiagram(RunsHashAll,500,1,99,DirGraphs)
convert = "inkscape --export-png="+DirGraphs+"/"+RoseDiagram+".png --export-dpi=125 "+\
DirGraphs+RoseDiagram+".svg 2>/dev/null"
os.popen(convert)
print "RoseDiagram (coordsset=1) =", RoseDiagram
ThreeFrameResults = PS_Maths.ThreeFrameRunAnalysis(RunsHashAll,FDs,DirGraphs)
print "3 Frame Results =", len(ThreeFrameResults)
ThreeFrameGraph = PS_Plots.PlotThreeFrameResults(ThreeFrameResults,0,DirGraphs)
print "3 Frame Graph (coordsset=0) =", ThreeFrameGraph
ThreeFrameGraph = PS_Plots.PlotThreeFrameResults(ThreeFrameResults,1,DirGraphs)
print "3 Frame Graph (coordsset=1) =", ThreeFrameGraph
ThreeFrameMaxResults = PS_Maths.ThreeFrameMaxRunAnalysis(RunsHashAll,FDs,DirGraphs)
print "3 Frame Max Results =", len(ThreeFrameMaxResults)
ThreeFrameMaxGraph = PS_Plots.PlotThreeFrameMaxResults(ThreeFrameMaxResults,0,DirGraphs)
print "3 Frame Max Graph (coordsset=0) =", ThreeFrameMaxGraph
ThreeFrameMaxGraph = PS_Plots.PlotThreeFrameMaxResults(ThreeFrameMaxResults,1,DirGraphs)
print "3 Frame Max Graph (coordsset=1) =", ThreeFrameMaxGraph
DirChangeResults = PS_Maths.DirectionChangesAnalysis(RunsHashAll,0,DirGraphs)
print "Direction Change Results (coordsset=0) =", len(DirChangeResults)
DirChangeGraph = PS_Plots.PlotDirChangeResults(DirChangeResults,0,DirGraphs)
print "Direction Changes Graph (coordsset=0) =", DirChangeGraph
DirChangeResults = PS_Maths.DirectionChangesAnalysis(RunsHashAll,1,DirGraphs)
print "Direction Change Results (coordsset=1) =", len(DirChangeResults)
DirChangeGraph = PS_Plots.PlotDirChangeResults(DirChangeResults,1,DirGraphs)
print "Direction Changes Graph (coordsset=0) =", DirChangeGraph
# OK Lets do some stats comparisons between the two excel files
print PS_Inputs.Colourer("### Comparison Statistics ###","green",\
options.OutputType,"bold",FontSize_Titles)
print PS_Inputs.Colourer(" Comparing Coords Set 1 to Coords Set 2","black",\
options.OutputType,"",FontSize_Text)
print PS_Inputs.Colourer(" "+FDs[0]['InputFileName']+" vs "+FDs[1]['InputFileName'],\
"black",options.OutputType,"",FontSize_Text)
Output = PS_Maths.Stats_TTests(RealAllRunsX[0],RealAllRunsX[1])
print Output
# Plot Average Run Length for the 2 coords sets
if( options.graphs):
print ""
#PS_Plots.PlotAveRunLength(Stats_Global_AveLen)
#PS_Plots.PlotSpeed(Stats_Global_AveSpeed)
print PS_Inputs.Colourer("### FIN ###","green",options.OutputType,"bold",FontSize_Text)
print PS_Inputs.Colourer(Separator,"grey",options.OutputType,"",FontSize_Text)
if(options.OutputType == "html"):
PS_Outputs.Print_HTMLTails(BaseDir, DirGraphs, options.ExcelFile1, options.ExcelFile2 )
#------------------------------------------------------------------------------
# FIN
#------------------------------------------------------------------------------
| gpl-3.0 | 5,150,384,486,752,012,000 | 44.635 | 113 | 0.552463 | false |
ArcherSys/ArcherSys | Lib/encodings/cp500.py | 1 | 39503 | <<<<<<< HEAD
<<<<<<< HEAD
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
=======
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 6,183,082,987,881,244,000 | 41.613808 | 116 | 0.51948 | false |
againer/supercda | utils/clinic_pickler.py | 1 | 3611 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Alex Gainer ([email protected])"
__copyright__ = "Copyright 2014, Health Records For Everyone (HR4E)"
import cPickle as pickle
import datetime
import os
class PickleNotFoundException(Exception):
"""Missing Pickle Exception"""
class ClinicPickle(object):
"""Word dictionary utilities for pickling GRE words."""
_PICKLE_FOLDER = os.path.join('data', 'clinics')
_MISSING_PICKLE = 'Pickle {0} File Missing.'
def __init__(self, name):
self.name = name
self.date_created = datetime.datetime.now()
@classmethod
def create(cls, name):
"""Creates a clinic object and pickles it."""
try:
pickle_file_name = '{0}.pkl'.format(name)
path_to_pickle = os.path.join(cls._PICKLE_FOLDER,
pickle_file_name)
path = os.path.isfile(path_to_pickle)
if not path:
pickle.dump(cls(name).__dict__, file(path_to_pickle, 'wb'))
except IOError:
raise PickleNotFoundException, self._MISSING_PICKLE.format(name)
def delete(self):
"""Deletes a Clinic Pickle File."""
try:
pickle_file_name = '{0}.pkl'.format(self.name)
path_to_pickle = os.path.join(self._PICKLE_FOLDER,
pickle_file_name)
os.remove(path_to_pickle)
except IOError:
missing_pickle_error = self._MISSING_PICKLE.format(self.name)
raise PickleNotFoundException, missing_pickle_error
@classmethod
def get_all(cls):
return filter(lambda x: x != None,
[cls.load(name) for name in cls.GetAllClinicNames()])
@classmethod
def get_all_clinic_names(cls):
pkl_files = [f for f in os.listdir(cls._PICKLE_FOLDER)
if os.path.isfile(os.path.join(cls._PICKLE_FOLDER,f))]
return [_.strip('.pkl') for _ in pkl_files]
@classmethod
def load(cls, name):
"""Loads up a pickled clinic as a clinic object."""
try:
pickle_file_name = '{0}.pkl'.format(name)
path_to_pickle = os.path.join(cls._PICKLE_FOLDER,
pickle_file_name)
if os.path.isfile(path_to_pickle):
clinic = cls(name)
clinic.__dict__ = pickle.load(file(path_to_pickle, 'r+b'))
else:
clinic = None
return clinic
except IOError:
return None
def update(self, post_data):
"""Updates a clinic given the post_data dictionary."""
self.__dict__.update({})
try:
pickle_file_name = '{0}.pkl'.format(self.name)
path_to_pickle = os.path.join(self._PICKLE_FOLDER,
pickle_file_name)
if os.path.isfile(path_to_pickle):
pickle.dump(self.__dict__, file(path_to_pickle), 'wb')
except IOError:
raise PickleNotFoundException, self._MISSING_PICKLE.format(name)
| apache-2.0 | -7,934,011,113,419,530,000 | 36.226804 | 76 | 0.585987 | false |
creotiv/django-fuzzytest | django_fuzzytest/utils.py | 1 | 8312 | # -*- coding: utf-8 -*-
import itertools
from sre_constants import *
import sre_parse
import string
import random
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver, LocaleRegexURLResolver
from django.utils import translation
from django.core.exceptions import ViewDoesNotExist
from django.contrib.admindocs.views import simplify_regex
class RegexpInverter(object):
category_chars = {
CATEGORY_DIGIT : string.digits,
CATEGORY_SPACE : string.whitespace,
CATEGORY_WORD : string.digits + string.letters + '_'
}
def _unique_extend(self, res_list, list):
for item in list:
if item not in res_list:
res_list.append(item)
def _handle_any(self, val):
"""
This is different from normal regexp matching. It only matches
printable ASCII characters.
"""
return string.printable
def _handle_branch(self, (tok, val)):
all_opts = []
for toks in val:
opts = self._permute_toks(toks)
self._unique_extend(all_opts, opts)
return all_opts
def _handle_category(self, val):
return list(self.category_chars[val])
def _handle_in(self, val):
out = []
for tok, val in val:
out += self._handle_tok(tok, val)
return out
def _handle_literal(self, val):
return [unichr(val)]
def _handle_max_repeat(self, (min, max, val)):
"""
Handle a repeat token such as {x,y} or ?.
"""
subtok, subval = val[0]
if max > 5000:
# max is the number of cartesian join operations needed to be
# carried out. More than 5000 consumes way to much memory.
raise ValueError("To many repetitions requested (%d)" % max)
optlist = self._handle_tok(subtok, subval)
iterlist = []
for x in range(min, max + 1):
joined = self._join([optlist] * x)
iterlist.append(joined)
return (''.join(it) for it in itertools.chain(*iterlist))
def _handle_range(self, val):
lo, hi = val
return (chr(x) for x in range(lo, hi + 1))
def _handle_subpattern(self, val):
return list(self._permute_toks(val[1]))
def _handle_tok(self, tok, val):
"""
Returns a list of strings of possible permutations for this regexp
token.
"""
handlers = {
ANY : self._handle_any,
BRANCH : self._handle_branch,
CATEGORY : self._handle_category,
LITERAL : self._handle_literal,
IN : self._handle_in,
MAX_REPEAT : self._handle_max_repeat,
RANGE : self._handle_range,
SUBPATTERN : self._handle_subpattern}
try:
return handlers[tok](val)
except KeyError, e:
fmt = "Unsupported regular expression construct: %s"
raise ValueError(fmt % tok)
def _permute_toks(self, toks):
"""
Returns a generator of strings of possible permutations for this
regexp token list.
"""
lists = [self._handle_tok(tok, val) for tok, val in toks]
return (''.join(it) for it in self._join(lists))
def _join(self, iterlist):
"""
Cartesian join as an iterator of the supplied sequences. Borrowed
from:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
"""
def rloop(seqin, comb):
if seqin:
for item in seqin[0]:
newcomb = comb + [item]
for item in rloop(seqin[1:], newcomb):
yield item
else:
yield comb
return rloop(iterlist, [])
########## PUBLIC API ####################
def ipermute(self, p):
toks = [tok_n_val for tok_n_val in sre_parse.parse(p)]
return self._permute_toks(toks)
def permute(self, p):
return list(self.ipermute(p))
def random(self, p, length):
res = self.permute(p)
return ''.join(random.choice(res) for i in xrange(length))
class UrlFinder(object):
# TODO: Look at https://github.com/ierror/django-js-reverse
def get_urls(self, exclude=None):
if getattr(settings, 'ADMIN_FOR', None):
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
decorator = ['login_required']
urlconf = "ROOT_URLCONF"
views = []
for settings_mod in settings_modules:
try:
urlconf = __import__(getattr(settings_mod, urlconf), {}, {}, [''])
except Exception as e:
if options.get('traceback', None):
import traceback
traceback.print_exc()
print(style.ERROR("Error occurred while trying to load %s: %s" % (getattr(settings_mod, urlconf), str(e))))
continue
view_functions = self.extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, url_name) in view_functions:
if hasattr(func, '__globals__'):
func_globals = func.__globals__
elif hasattr(func, 'func_globals'):
func_globals = func.func_globals
else:
func_globals = {}
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
views.append({
"module":func.__module__,
"method":func_name,
"name":url_name,
"regexp": regex,
"url":simplify_regex(regex)
})
def extract_views_from_urlpatterns(self, urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if isinstance(p, RegexURLPattern):
try:
if not p.name:
name = p.name
elif namespace:
name = '{0}:{1}'.format(namespace, p.name)
else:
name = p.name
views.append((p.callback, base + p.regex.pattern, name))
except ViewDoesNotExist:
continue
elif isinstance(p, RegexURLResolver):
try:
patterns = p.url_patterns
except ImportError:
continue
if namespace and p.namespace:
_namespace = '{0}:{1}'.format(namespace, p.namespace)
else:
_namespace = (p.namespace or namespace)
if isinstance(p, LocaleRegexURLResolver):
for langauge in self.LANGUAGES:
with translation.override(langauge[0]):
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
else:
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=_namespace))
elif hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern, p.name))
except ViewDoesNotExist:
continue
elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(self.extract_views_from_urlpatterns(patterns, base + p.regex.pattern, namespace=namespace))
else:
raise TypeError("%s does not appear to be a urlpattern object" % p)
return views
| bsd-3-clause | 3,566,915,466,897,581,600 | 34.827586 | 133 | 0.522859 | false |
stuart-knock/tvb-library | tvb/basic/traits/traited_interface.py | 1 | 10621 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Generate Dictionary required by the Framework to generate UI from it.
Returned dictionary will be generated from traited definition of attributes.
.. moduleauthor:: Lia Domide <[email protected]>
.. moduleauthor:: Stuart Knock <[email protected]>
.. moduleauthor:: marmaduke <[email protected]>
"""
import numpy
import json
from tvb.basic.logger.builder import get_logger
from tvb.basic.traits.util import get, str_class_name, multiline_math_directives_to_matjax
from tvb.basic.traits.core import KWARG_AVOID_SUBCLASSES, TYPE_REGISTER, KWARG_FILTERS_UI, KWARG_FILTERS_BACKEND
LOG = get_logger(__name__)
INTERFACE_ATTRIBUTES_ONLY = "attributes-only"
INTERFACE_ATTRIBUTES = "attributes"
class TraitedInterfaceGenerator(object):
"""
Bases class for interface reading and dumping. As a data descriptor, when
it is an attribute of the class it will compute a dictionary and return it.
"""
def __get__(self, inst, ownr):
obj = inst if inst else ownr
if not obj.trait.bound:
return {}
intr = {}
self.__fill_basics(obj, intr)
self.__fill_ranges(obj, intr)
self.__fill_noiseconfig(obj, intr)
self.__fill_filters(obj, intr)
if obj.trait.wraps is not None:
self.__fill_wrapped_type(obj, intr)
if intr['type'] == 'dict' and isinstance(intr['default'], dict):
intr['attributes'], intr['elementType'] = self.__prepare_dictionary(intr['default'])
if len(intr['attributes']) == 0:
## Dictionary without any sub-parameter
return {}
mro_type_names = [i.__name__ for i in ownr.mro()]
if 'Array' in mro_type_names:
self.__fill_array(obj, inst, intr)
##### TYPE & subclasses specifics ######################################
elif ('Type' in mro_type_names
and (obj.__module__ != 'tvb.basic.traits.types_basic' or 'Range' in mro_type_names)
or 'Enumerate' in mro_type_names):
self.__fill_entity_attributes(obj, intr)
if obj.trait.bound == INTERFACE_ATTRIBUTES_ONLY:
# We need to do this, to avoid infinite loop on attributes
# of class Type with no subclasses
return intr
if obj.trait.select_multiple:
intr['type'] = 'selectMultiple'
else:
intr['type'] = 'select'
if 'MappedType' in mro_type_names:
self.__fill_mapped_type(ownr, intr)
else:
##### TYPE (not MAPPED_TYPE) again ####################################
intr['attributes'] = []
# Build options list
intr['options'] = []
if 'Enumerate' in obj.__class__.__name__:
self.__fill_enumerate(obj, intr)
return intr
else:
self.__handle_nonmapped_subtypes(ownr, obj, intr)
self.__correct_defaults(intr)
return intr
@staticmethod
def __fill_basics(obj, intr):
label = get(obj.trait.inits.kwd, 'label', obj.trait.name)
if not label:
label = obj.trait.name
intr.update({
'default': (obj.value or obj.trait.value) if hasattr(obj, 'value') else obj.trait.value,
'description': get(obj.trait.inits.kwd, 'doc'),
'label': label.capitalize(),
'name': obj.trait.name,
'locked': obj.trait.inits.kwd.get('locked', False),
'required': obj.trait.inits.kwd.get('required', True)
})
@staticmethod
def __fill_ranges(obj, intr):
range_value = obj.trait.inits.kwd.get('range', False)
if range_value:
intr['minValue'] = range_value.lo
intr['maxValue'] = range_value.hi
if range_value.step is not None:
intr['stepValue'] = range_value.step
else:
LOG.debug("Missing Range.step field for attribute %s, we will consider a default." % obj.trait.name)
intr['stepValue'] = (range_value.hi - range_value.hi) / 10
@staticmethod
def __fill_noiseconfig(obj, intr):
noise_configurable = obj.trait.inits.kwd.get('configurable_noise', None)
if noise_configurable is not None:
intr['configurableNoise'] = noise_configurable
@staticmethod
def __fill_filters(obj, intr):
if KWARG_FILTERS_UI in obj.trait.inits.kwd:
intr[KWARG_FILTERS_UI] = json.dumps([ui_filter.to_dict() for ui_filter in
obj.trait.inits.kwd[KWARG_FILTERS_UI]])
if KWARG_FILTERS_BACKEND in obj.trait.inits.kwd:
intr["conditions"] = obj.trait.inits.kwd[KWARG_FILTERS_BACKEND]
@staticmethod
def __fill_wrapped_type(obj, intr):
if isinstance(obj.trait.wraps, tuple):
intr['type'] = obj.trait.wraps[0].__name__
else:
intr['type'] = obj.trait.wraps.__name__
@staticmethod
def __fill_array(obj, inst, intr):
intr['type'] = 'array'
intr['elementType'] = str(inst.dtype)
if isinstance(obj.trait.value, numpy.ndarray):
# Make sure arrays are displayed in a compatible form: [1, 2, 3]
intr['default'] = str(obj.trait.value.tolist())
@staticmethod
def __fill_entity_attributes(obj, intr):
# Populate Attributes for current entity
attrs = sorted(obj.trait.values(), key=lambda entity: entity.trait.order_number)
attrs = [val.interface for val in attrs if val.trait.order_number >= 0]
attrs = [attr for attr in attrs if attr is not None and len(attr) > 0]
intr['attributes'] = attrs
@staticmethod
def __fill_mapped_type(ownr, intr):
intr['datatype'] = True
#### For simple DataTypes, cut options and attributes
intr['options'] = []
if not ownr._ui_complex_datatype:
intr['attributes'] = []
ownr_class = ownr.__class__
else:
ownr_class = ownr._ui_complex_datatype
if 'MetaType' in ownr_class.__name__:
ownr_class = ownr().__class__
intr['type'] = ownr_class.__module__ + '.' + ownr_class.__name__
@staticmethod
def __fill_enumerate(obj, intr):
for val in obj.trait.options:
intr['options'].append({'name': val,
'value': val})
intr['default'] = obj.trait.value
def __prepare_dictionary(self, dictionary):
"""
From base.Dict -> default [isinstance(dict)], prepare an interface specific tree.
"""
result = []
element_type = None
for key in dictionary:
entry = {}
value = dictionary[key]
entry['label'] = key
entry['name'] = key
if type(value) == dict:
entry['attributes'], entry['elementType'] = self.__prepare_dictionary(value)
value = ''
entry['default'] = str(value)
if hasattr(value, 'tolist') or 'Array' in [i.__name__ for i in type(value).mro()]:
entry['type'] = 'array'
if not hasattr(value, 'tolist'):
entry['default'] = str(value.trait.value)
else:
entry['type'] = type(value).__name__
element_type = entry['type']
result.append(entry)
return result, element_type
@staticmethod
def __handle_nonmapped_subtypes(ownr, obj, intr):
""" Populate options for each subtype. This fills in models etc"""
for opt in TYPE_REGISTER.subclasses(ownr, KWARG_AVOID_SUBCLASSES in obj.trait.inits.kwd):
if hasattr(obj, 'value') and obj.value is not None and isinstance(obj.value, opt):
## fill option currently selected with attributes from instance
opt = obj.value
opt_class = opt.__class__
else:
opt_class = opt
opt.trait.bound = INTERFACE_ATTRIBUTES_ONLY
description = multiline_math_directives_to_matjax(opt_class.__doc__)
intr['options'].append({'name': get(opt, '_ui_name', opt_class.__name__),
'value': str_class_name(opt_class, short_form=True),
'class': str_class_name(opt_class, short_form=False),
'description': description,
'attributes': opt.interface['attributes']})
@staticmethod
def __correct_defaults(intr):
if intr['default'] is not None:
intr['default'] = intr['default'].__class__.__name__
if intr['default'] == 'RandomState':
intr['default'] = 'RandomStream'
else:
intr['default'] = None
def __set__(self, inst, val):
"""
Given a hierarchical dictionary of the kind generated by __get__, with the
chosen options, we should be able to fully instantiate a class.
"""
raise NotImplementedError
| gpl-2.0 | 7,904,017,983,955,675,000 | 37.0681 | 116 | 0.58036 | false |
davibe/pygobject | gi/_signalhelper.py | 1 | 9887 | # -*- Mode: Python; py-indent-offset: 4 -*-
# pygobject - Python bindings for the GObject library
# Copyright (C) 2012 Simon Feltman
#
# gi/_signalhelper.py: GObject signal binding decorator object
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
import inspect
from ._gi import _gobject
# Callable went away in python 3.0 and came back in 3.2.
# Use versioning to figure out when to define it, otherwise we have to deal with
# the complexity of using __builtin__ or builtin between python versions to
# check if callable exists which PyFlakes will also complain about.
if (3, 0) <= sys.version_info < (3, 2):
def callable(fn):
return hasattr(fn, '__call__')
class Signal(str):
"""
Object which gives a nice API for creating and binding signals.
Example:
class Spam(GObject.GObject):
velocity = 0
@GObject.Signal
def pushed(self):
self.velocity += 1
@GObject.Signal(flags=GObject.SignalFlags.RUN_LAST)
def pulled(self):
self.velocity -= 1
stomped = GObject.Signal('stomped', arg_types=(int,))
@GObject.Signal
def annotated_signal(self, a:int, b:str):
"Python3 annotation support for parameter types.
def on_pushed(obj):
print(obj)
spam = Spam()
spam.pushed.connect(on_pushed)
spam.pushed.emit()
"""
class BoundSignal(str):
"""
Temporary binding object which can be used for connecting signals
without specifying the signal name string to connect.
"""
def __new__(cls, name, *args, **kargs):
return str.__new__(cls, name)
def __init__(self, signal, gobj):
str.__init__(self)
self.signal = signal
self.gobj = gobj
def __repr__(self):
return 'BoundSignal("%s")' % self
def __call__(self, *args, **kargs):
"""Call the signals closure."""
return self.signal.func(self.gobj, *args, **kargs)
def connect(self, callback, *args, **kargs):
"""Same as GObject.GObject.connect except there is no need to specify
the signal name."""
return self.gobj.connect(self, callback, *args, **kargs)
def connect_detailed(self, callback, detail, *args, **kargs):
"""Same as GObject.GObject.connect except there is no need to specify
the signal name. In addition concats "::<detail>" to the signal name
when connecting; for use with notifications like "notify" when a property
changes.
"""
return self.gobj.connect(self + '::' + detail, callback, *args, **kargs)
def disconnect(self, handler_id):
"""Same as GObject.GObject.disconnect."""
self.instance.disconnect(handler_id)
def emit(self, *args, **kargs):
"""Same as GObject.GObject.emit except there is no need to specify
the signal name."""
return self.gobj.emit(str(self), *args, **kargs)
def __new__(cls, name='', *args, **kargs):
if callable(name):
name = name.__name__
return str.__new__(cls, name)
def __init__(self, name='', func=None, flags=_gobject.SIGNAL_RUN_FIRST,
return_type=None, arg_types=None, doc='', accumulator=None, accu_data=None):
"""
@param name: name of signal or closure method when used as direct decorator.
@type name: string or callable
@param func: closure method.
@type func: callable
@param flags: flags specifying when to run closure
@type flags: GObject.SignalFlags
@param return_type: return type
@type return_type: type
@param arg_types: list of argument types specifying the signals function signature
@type arg_types: None
@param doc: documentation of signal object
@type doc: string
@param accumulator: accumulator method with the signature:
func(ihint, return_accu, handler_return, accu_data) -> boolean
@type accumulator: function
@param accu_data: user data passed to the accumulator
@type accu_data: object
"""
if func and not name:
name = func.__name__
elif callable(name):
func = name
name = func.__name__
if func and not doc:
doc = func.__doc__
str.__init__(self)
if func and not (return_type or arg_types):
return_type, arg_types = get_signal_annotations(func)
if arg_types is None:
arg_types = tuple()
self.func = func
self.flags = flags
self.return_type = return_type
self.arg_types = arg_types
self.__doc__ = doc
self.accumulator = accumulator
self.accu_data = accu_data
def __get__(self, instance, owner=None):
"""Returns a BoundSignal when accessed on an object instance."""
if instance is None:
return self
return self.BoundSignal(self, instance)
def __call__(self, obj, *args, **kargs):
"""Allows for instantiated Signals to be used as a decorator or calling
of the underlying signal method."""
# If obj is a GObject, than we call this signal as a closure otherwise
# it is used as a re-application of a decorator.
if isinstance(obj, _gobject.GObject):
self.func(obj, *args, **kargs)
else:
# If self is already an allocated name, use it otherwise create a new named
# signal using the closure name as the name.
if str(self):
name = str(self)
else:
name = obj.__name__
# Return a new value of this type since it is based on an immutable string.
return type(self)(name=name, func=obj, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def copy(self, newName=None):
"""Returns a renamed copy of the Signal."""
if newName is None:
newName = self.name
return type(self)(name=newName, func=self.func, flags=self.flags,
return_type=self.return_type, arg_types=self.arg_types,
doc=self.__doc__, accumulator=self.accumulator, accu_data=self.accu_data)
def get_signal_args(self):
"""Returns a tuple of: (flags, return_type, arg_types, accumulator, accu_data)"""
return (self.flags, self.return_type, self.arg_types, self.accumulator, self.accu_data)
class SignalOverride(Signal):
"""Specialized sub-class of signal which can be used as a decorator for overriding
existing signals on GObjects.
Example:
class MyWidget(Gtk.Widget):
@GObject.SignalOverride
def configure_event(self):
pass
"""
def get_signal_args(self):
"""Returns the string 'override'."""
return 'override'
def get_signal_annotations(func):
"""Attempt pulling python 3 function annotations off of 'func' for
use as a signals type information. Returns an ordered nested tuple
of (return_type, (arg_type1, arg_type2, ...)). If the given function
does not have annotations then (None, tuple()) is returned.
"""
arg_types = tuple()
return_type = None
if hasattr(func, '__annotations__'):
spec = inspect.getfullargspec(func)
arg_types = tuple(spec.annotations[arg] for arg in spec.args
if arg in spec.annotations)
if 'return' in spec.annotations:
return_type = spec.annotations['return']
return return_type, arg_types
def install_signals(cls):
"""Adds Signal instances on a GObject derived class into the '__gsignals__'
dictionary to be picked up and registered as real GObject signals.
"""
gsignals = cls.__dict__.get('__gsignals__', {})
newsignals = {}
for name, signal in cls.__dict__.items():
if isinstance(signal, Signal):
signalName = str(signal)
# Fixup a signal which is unnamed by using the class variable name.
# Since Signal is based on string which immutable,
# we must copy and replace the class variable.
if not signalName:
signalName = name
signal = signal.copy(name)
setattr(cls, name, signal)
if signalName in gsignals:
raise ValueError('Signal "%s" has already been registered.' % name)
newsignals[signalName] = signal
gsignals[signalName] = signal.get_signal_args()
cls.__gsignals__ = gsignals
# Setup signal closures by adding the specially named
# method to the class in the form of "do_<signal_name>".
for name, signal in newsignals.items():
if signal.func is not None:
funcName = 'do_' + name.replace('-', '_')
if not hasattr(cls, funcName):
setattr(cls, funcName, signal.func)
| lgpl-2.1 | -5,544,120,678,858,116,000 | 37.321705 | 103 | 0.607363 | false |
gholt/python-brim | brim/httpform.py | 1 | 10385 | """Module for working with HTTP Form POSTs iteratively.
.. warning::
This is an early version of this module. It has no tests, limited
documentation, and is subject to major changes.
Provides tools for parsing an HTTP Form POST without reading the whole
thing into memory first. Many thanks to Michael Barton for the original
prototype which I mangled into OpenStack Swift's formpost middleware and
then into this module.
The basic usage is to iterate over iter_form results, which are
rfc822.Message instances::
from brim.httpform import iter_form, parse_attrs
def wsgi_app(env, start_response):
for message in iter_form(env):
body = message.fp.read()
value, attrs = \\
parse_attrs(message.getheader('content-disposition'))
if value != 'form-data':
continue
if 'filename' in attrs:
filevarname = attrs['name']
filename = attrs['filename']
filecontent = body
else:
varname = attrs['name']
varvalue = body
See also the simple test at the end of the source file.
"""
"""Copyright and License.
Copyright 2012-2014 Gregory Holt
Copyright 2011 OpenStack, LLC.
Original source taken from OpenStack Swift FormPost middleware and
modified to be more generic.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from rfc822 import Message
_ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
class FormInvalid(Exception):
pass
def parse_attrs(header):
"""Returns (value, attr_dict) for an HTTP attr header.
Given a header like::
Content-Disposition: form-data; name="abc"; filename="test.html"
Returns::
("form-data", {"name": "abc", "filename": "test.html"})
Example usage with an rfc822.Message::
value, attrs = parse_attrs(
message.getheader('content-disposition'))
"""
attributes = {}
attrs = ''
if '; ' in header:
header, attrs = header.split('; ', 1)
m = True
while m:
m = _ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class _FormPartFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size):
self.no_more_data_for_this_message = False
self.no_more_messages = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
self.read_chunk_size = read_chunk_size
def read(self, length=None):
if not length:
length = self.read_chunk_size
if self.no_more_data_for_this_message:
return ''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
chunk = self.wsgi_input.read(to_read)
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_messages = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_messages = self.input_buffer.startswith('--')
self.no_more_data_for_this_message = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_message:
return ''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
chunk = self.wsgi_input.read(self.read_chunk_size)
self.input_buffer += chunk
newline_pos = self.input_buffer.find('\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_messages = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = ''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
class CappedFileLikeObject(object):
"""Reads a limited amount from a file-like object.
A file-like object wrapping another file-like object that raises an
EOFError if the amount of data read exceeds a given max_file_size.
This is useful to cap the form data size accepted::
for message in iter_form(env):
try:
content = CappedFileLikeObject(message.fp, 4096).read()
except EOFError:
raise HTTPRequestEntityTooLarge(
'Max form part size is 4096.\\n')
"""
def __init__(self, fp, max_file_size):
self.fp = fp
self.max_file_size = max_file_size
self.amount_read = 0
def read(self, size=None):
ret = self.fp.read(size)
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
def readline(self):
ret = self.fp.readline()
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
raise EOFError('max_file_size exceeded')
return ret
def iter_form(env, read_chunk_size=4096):
"""Yields messages for an HTTP Form POST.
Parses an HTTP Form POST and yields rfc822.Message instances for
each form part. See the overview module :py:mod:`brim.httpform`
for usage.
:param env: The WSGI environment for the incoming request.
:param read_chunk_size: The maximum amount to read at once from the
incoming request.
:returns: A generator yielding rfc822.Messages; be sure to fully
read from the message.fp file-like object before continuing to
the next message of the generator.
"""
content_type, attrs = parse_attrs(env.get('CONTENT_TYPE') or '')
if content_type != 'multipart/form-data':
raise FormInvalid('Content-Type not "multipart/form-data".')
boundary = attrs.get('boundary')
if not boundary:
raise FormInvalid('Content-Type does not define a form boundary.')
boundary = '--' + boundary
wsgi_input = env['wsgi.input']
if wsgi_input.readline().strip() != boundary:
raise FormInvalid('Invalid starting boundary.')
boundary = '\r\n' + boundary
input_buffer = ''
done = False
while not done:
fp = _FormPartFileLikeObject(wsgi_input, boundary, input_buffer,
read_chunk_size)
yield Message(fp, 0)
done = fp.no_more_messages
input_buffer = fp.input_buffer
if __name__ == '__main__':
# TODO: Real tests in brim.test.unit.test_httpform
# This is just a quick test.
from StringIO import StringIO
wsgi_input = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
'redirect value',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(15),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(25),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(1234),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
'sig value',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'']))
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'wsgi.input': wsgi_input}
for message in iter_form(env):
print '---'
body = message.fp.read()
value, attrs = parse_attrs(message.getheader('content-disposition'))
if value != 'form-data':
continue
if 'filename' in attrs:
print 'FILE %s named %r:' % (attrs['name'], attrs['filename'])
print body
else:
print 'VARIABLE %s = %r' % (attrs['name'], body)
| apache-2.0 | -1,204,917,751,737,703,000 | 33.848993 | 78 | 0.596052 | false |
cmoutard/mne-python | mne/io/array/array.py | 2 | 1869 | """Tools for creating Raw objects from numpy arrays"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ..base import _BaseRaw
from ...utils import verbose, logger
class RawArray(_BaseRaw):
"""Raw object from numpy array
Parameters
----------
data : array, shape (n_channels, n_times)
The channels' time series.
info : instance of Info
Info dictionary. Consider using `create_info` to populate
this structure. This may be modified in place by the class.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
EpochsArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples')
logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
% (dtype.__name__, data.shape[0], data.shape[1]))
if len(data) != len(info['ch_names']):
raise ValueError('len(data) does not match len(info["ch_names"])')
assert len(info['ch_names']) == info['nchan']
if info.get('buffer_size_sec', None) is None:
info['buffer_size_sec'] = 1. # reasonable default
super(RawArray, self).__init__(info, data, verbose=verbose)
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
self.first_samp, self.last_samp,
float(self.first_samp) / info['sfreq'],
float(self.last_samp) / info['sfreq']))
logger.info('Ready.')
| bsd-3-clause | 7,648,877,112,066,561,000 | 34.942308 | 79 | 0.578919 | false |
bcorbet/SickRage | sickbeard/tv.py | 1 | 109275 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import datetime
import threading
import re
import glob
import stat
import traceback
import shutil
import sickbeard
import xml.etree.cElementTree as etree
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from lib import subliminal
try:
from lib.send2trash import send2trash
except ImportError:
pass
from lib.imdb import imdb
from sickbeard import db
from sickbeard import helpers, exceptions, logger
from sickbeard.exceptions import ex
from sickbeard import image_cache
from sickbeard import notifiers
from sickbeard import postProcessor
from sickbeard import subtitles
from sickbeard import history
from sickbeard import encodingKludge as ek
from common import Quality, Overview, statusStrings
from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, \
UNKNOWN, FAILED
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \
NAMING_LIMITED_EXTEND_E_PREFIXED
def dirty_setter(attr_name):
def wrapper(self, val):
if getattr(self, attr_name) != val:
setattr(self, attr_name, val)
self.dirty = True
return wrapper
class TVShow(object):
def __init__(self, indexer, indexerid, lang=""):
self._indexerid = int(indexerid)
self._indexer = int(indexer)
self._name = ""
self._imdbid = ""
self._network = ""
self._genre = ""
self._classification = ""
self._runtime = 0
self._imdb_info = {}
self._quality = int(sickbeard.QUALITY_DEFAULT)
self._flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT)
self._status = "Unknown"
self._airs = ""
self._startyear = 0
self._paused = 0
self._air_by_date = 0
self._subtitles = int(sickbeard.SUBTITLES_DEFAULT)
self._dvdorder = 0
self._archive_firstmatch = 0
self._lang = lang
self._last_update_indexer = 1
self._sports = 0
self._anime = 0
self._scene = 0
self._rls_ignore_words = ""
self._rls_require_words = ""
self._default_ep_status = SKIPPED
self.dirty = True
self._location = ""
self.lock = threading.Lock()
self.isDirGood = False
self.episodes = {}
self.nextaired = ""
otherShow = helpers.findCertainShow(sickbeard.showList, self.indexerid)
if otherShow != None:
raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists")
self.loadFromDB()
name = property(lambda self: self._name, dirty_setter("_name"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
# location = property(lambda self: self._location, dirty_setter("_location"))
imdbid = property(lambda self: self._imdbid, dirty_setter("_imdbid"))
network = property(lambda self: self._network, dirty_setter("_network"))
genre = property(lambda self: self._genre, dirty_setter("_genre"))
classification = property(lambda self: self._classification, dirty_setter("_classification"))
runtime = property(lambda self: self._runtime, dirty_setter("_runtime"))
imdb_info = property(lambda self: self._imdb_info, dirty_setter("_imdb_info"))
quality = property(lambda self: self._quality, dirty_setter("_quality"))
flatten_folders = property(lambda self: self._flatten_folders, dirty_setter("_flatten_folders"))
status = property(lambda self: self._status, dirty_setter("_status"))
airs = property(lambda self: self._airs, dirty_setter("_airs"))
startyear = property(lambda self: self._startyear, dirty_setter("_startyear"))
paused = property(lambda self: self._paused, dirty_setter("_paused"))
air_by_date = property(lambda self: self._air_by_date, dirty_setter("_air_by_date"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
dvdorder = property(lambda self: self._dvdorder, dirty_setter("_dvdorder"))
archive_firstmatch = property(lambda self: self._archive_firstmatch, dirty_setter("_archive_firstmatch"))
lang = property(lambda self: self._lang, dirty_setter("_lang"))
last_update_indexer = property(lambda self: self._last_update_indexer, dirty_setter("_last_update_indexer"))
sports = property(lambda self: self._sports, dirty_setter("_sports"))
anime = property(lambda self: self._anime, dirty_setter("_anime"))
scene = property(lambda self: self._scene, dirty_setter("_scene"))
rls_ignore_words = property(lambda self: self._rls_ignore_words, dirty_setter("_rls_ignore_words"))
rls_require_words = property(lambda self: self._rls_require_words, dirty_setter("_rls_require_words"))
default_ep_status = property(lambda self: self._default_ep_status, dirty_setter("_default_ep_status"))
@property
def is_anime(self):
if int(self.anime) > 0:
return True
else:
return False
@property
def is_sports(self):
if int(self.sports) > 0:
return True
else:
return False
@property
def is_scene(self):
if int(self.scene) > 0:
return True
else:
return False
def _getLocation(self):
# no dir check needed if missing show dirs are created during post-processing
if sickbeard.CREATE_MISSING_SHOW_DIRS:
return self._location
if ek.ek(os.path.isdir, self._location):
return self._location
else:
raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it")
def _setLocation(self, newLocation):
logger.log(u"Setter sets location to " + newLocation, logger.DEBUG)
# Don't validate dir if user wants to add shows without creating a dir
if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation):
dirty_setter("_location")(self, newLocation)
self._isDirGood = True
else:
raise exceptions.NoNFOException("Invalid folder for the show!")
location = property(_getLocation, _setLocation)
# delete references to anything that's not in the internal lists
def flushEpisodes(self):
for curSeason in self.episodes:
for curEp in self.episodes[curSeason]:
myEp = self.episodes[curSeason][curEp]
self.episodes[curSeason][curEp] = None
del myEp
def getAllEpisodes(self, season=None, has_location=False):
sql_selection = "SELECT season, episode, "
# subselection to detect multi-episodes early, share_location > 0
sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location != '' AND location = tve.location AND episode != tve.episode) AS share_location "
sql_selection = sql_selection + " FROM tv_episodes tve WHERE showid = " + str(self.indexerid)
if season is not None:
sql_selection = sql_selection + " AND season = " + str(season)
if has_location:
sql_selection = sql_selection + " AND location != '' "
# need ORDER episode ASC to rename multi-episodes in order S01E01-02
sql_selection = sql_selection + " ORDER BY season ASC, episode ASC"
myDB = db.DBConnection()
results = myDB.select(sql_selection)
ep_list = []
for cur_result in results:
cur_ep = self.getEpisode(int(cur_result["season"]), int(cur_result["episode"]))
if not cur_ep:
continue
cur_ep.relatedEps = []
if cur_ep.location:
# if there is a location, check if it's a multi-episode (share_location > 0) and put them in relatedEps
if cur_result["share_location"] > 0:
related_eps_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND location = ? AND episode != ? ORDER BY episode ASC",
[self.indexerid, cur_ep.season, cur_ep.location, cur_ep.episode])
for cur_related_ep in related_eps_result:
related_ep = self.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep and related_ep not in cur_ep.relatedEps:
cur_ep.relatedEps.append(related_ep)
ep_list.append(cur_ep)
return ep_list
def getEpisode(self, season=None, episode=None, file=None, noCreate=False, absolute_number=None, forceUpdate=False):
# if we get an anime get the real season and episode
if self.is_anime and absolute_number and not season and not episode:
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? AND absolute_number = ? AND season != 0"
sqlResults = myDB.select(sql, [self.indexerid, absolute_number])
if len(sqlResults) == 1:
episode = int(sqlResults[0]["episode"])
season = int(sqlResults[0]["season"])
logger.log(
"Found episode by absolute_number:" + str(absolute_number) + " which is " + str(season) + "x" + str(
episode), logger.DEBUG)
elif len(sqlResults) > 1:
logger.log("Multiple entries for absolute number: " + str(
absolute_number) + " in show: " + self.name + " found ", logger.ERROR)
return None
else:
logger.log(
"No entries for absolute number: " + str(absolute_number) + " in show: " + self.name + " found.",
logger.DEBUG)
return None
if not season in self.episodes:
self.episodes[season] = {}
if not episode in self.episodes[season] or self.episodes[season][episode] is None:
if noCreate:
return None
logger.log(str(self.indexerid) + u": An object for episode " + str(season) + "x" + str(
episode) + " didn't exist in the cache, trying to create it", logger.DEBUG)
if file:
ep = TVEpisode(self, season, episode, file)
else:
ep = TVEpisode(self, season, episode)
if ep != None:
self.episodes[season][episode] = ep
return self.episodes[season][episode]
def should_update(self, update_date=datetime.date.today()):
# if show is not 'Ended' always update (status 'Continuing')
if self.status == 'Continuing':
return True
# run logic against the current show latest aired and next unaired data to see if we should bypass 'Ended' status
graceperiod = datetime.timedelta(days=30)
last_airdate = datetime.date.fromordinal(1)
# get latest aired episode to compare against today - graceperiod and today + graceperiod
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status > '1' ORDER BY airdate DESC LIMIT 1",
[self.indexerid])
if sql_result:
last_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if last_airdate >= (update_date - graceperiod) and last_airdate <= (update_date + graceperiod):
return True
# get next upcoming UNAIRED episode to compare against today + graceperiod
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status = '1' ORDER BY airdate ASC LIMIT 1",
[self.indexerid])
if sql_result:
next_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if next_airdate <= (update_date + graceperiod):
return True
last_update_indexer = datetime.date.fromordinal(self.last_update_indexer)
# in the first year after ended (last airdate), update every 30 days
if (update_date - last_airdate) < datetime.timedelta(days=450) and (
update_date - last_update_indexer) > datetime.timedelta(days=30):
return True
return False
def writeShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Writing NFOs for show")
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_show_metadata(self) or result
return result
def writeMetadata(self, show_only=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.getImages()
self.writeShowNFO()
if not show_only:
self.writeEpisodeNFOs()
def writeEpisodeNFOs(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
logger.log(str(self.indexerid) + u": Writing NFOs for all episodes")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
for epResult in sqlResults:
logger.log(str(self.indexerid) + u": Retrieving/creating episode " + str(epResult["season"]) + "x" + str(
epResult["episode"]), logger.DEBUG)
curEp = self.getEpisode(epResult["season"], epResult["episode"])
if not curEp:
continue
curEp.createMetaFiles()
def updateMetadata(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.updateShowNFO()
def updateShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Updating NFOs for show with new indexer info")
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.update_show_indexer_metadata(self) or result
return result
# find all media files in the show folder and create episodes for as many as possible
def loadEpisodesFromDir(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, not loading episodes from disk")
return
logger.log(str(self.indexerid) + u": Loading all episodes from the show directory " + self._location)
# get file list
mediaFiles = helpers.listMediaFiles(self._location)
# create TVEpisodes from each media file (if possible)
sql_l = []
for mediaFile in mediaFiles:
parse_result = None
curEpisode = None
logger.log(str(self.indexerid) + u": Creating episode from " + mediaFile, logger.DEBUG)
try:
curEpisode = self.makeEpFromFile(ek.ek(os.path.join, self._location, mediaFile))
except (exceptions.ShowNotFoundException, exceptions.EpisodeNotFoundException), e:
logger.log(u"Episode " + mediaFile + " returned an exception: " + ex(e), logger.ERROR)
continue
except exceptions.EpisodeDeletedException:
logger.log(u"The episode deleted itself when I tried making an object for it", logger.DEBUG)
if curEpisode is None:
continue
# see if we should save the release name in the db
ep_file_name = ek.ek(os.path.basename, curEpisode.location)
ep_file_name = ek.ek(os.path.splitext, ep_file_name)[0]
try:
parse_result = None
np = NameParser(False, showObj=self, tryIndexers=True)
parse_result = np.parse(ep_file_name)
except (InvalidNameException, InvalidShowException):
pass
if not ' ' in ep_file_name and parse_result and parse_result.release_group:
logger.log(
u"Name " + ep_file_name + u" gave release group of " + parse_result.release_group + ", seems valid",
logger.DEBUG)
curEpisode.release_name = ep_file_name
# store the reference in the show
if curEpisode != None:
if self.subtitles:
try:
curEpisode.refreshSubtitles()
except:
logger.log(str(self.indexerid) + ": Could not refresh subtitles", logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
sql_l.append(curEpisode.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def loadEpisodesFromDB(self):
logger.log(u"Loading all episodes from the DB")
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ?"
sqlResults = myDB.select(sql, [self.indexerid])
scannedEps = {}
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
cachedShow = t[self.indexerid]
cachedSeasons = {}
for curResult in sqlResults:
deleteEp = False
curSeason = int(curResult["season"])
curEpisode = int(curResult["episode"])
if curSeason not in cachedSeasons:
try:
cachedSeasons[curSeason] = cachedShow[curSeason]
except sickbeard.indexer_seasonnotfound, e:
logger.log(u"Error when trying to load the episode from " + sickbeard.indexerApi(
self.indexer).name + ": " + e.message, logger.WARNING)
deleteEp = True
if not curSeason in scannedEps:
scannedEps[curSeason] = {}
logger.log(u"Loading episode " + str(curSeason) + "x" + str(curEpisode) + " from the DB", logger.DEBUG)
try:
curEp = self.getEpisode(curSeason, curEpisode)
if not curEp:
raise exceptions.EpisodeNotFoundException
# if we found out that the ep is no longer on TVDB then delete it from our database too
if deleteEp:
curEp.deleteEpisode()
curEp.loadFromDB(curSeason, curEpisode)
curEp.loadFromIndexer(tvapi=t, cachedSeason=cachedSeasons[curSeason])
scannedEps[curSeason][curEpisode] = True
except exceptions.EpisodeDeletedException:
logger.log(u"Tried loading an episode from the DB that should have been deleted, skipping it",
logger.DEBUG)
continue
return scannedEps
def loadEpisodesFromIndexer(self, cache=True):
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
try:
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
showObj = t[self.indexerid]
except sickbeard.indexer_error:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out, unable to update episodes from " + sickbeard.indexerApi(
self.indexer).name, logger.ERROR)
return None
logger.log(
str(self.indexerid) + u": Loading all episodes from " + sickbeard.indexerApi(self.indexer).name + "..")
scannedEps = {}
sql_l = []
for season in showObj:
scannedEps[season] = {}
for episode in showObj[season]:
# need some examples of wtf episode 0 means to decide if we want it or not
if episode == 0:
continue
try:
ep = self.getEpisode(season, episode)
if not ep:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log(
str(self.indexerid) + ": " + sickbeard.indexerApi(self.indexer).name + " object for " + str(
season) + "x" + str(episode) + " is incomplete, skipping this episode")
continue
else:
try:
ep.loadFromIndexer(tvapi=t)
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted, skipping the rest of the load")
continue
with ep.lock:
logger.log(str(self.indexerid) + u": Loading info from " + sickbeard.indexerApi(
self.indexer).name + " for episode " + str(season) + "x" + str(episode), logger.DEBUG)
ep.loadFromIndexer(season, episode, tvapi=t)
sql_l.append(ep.get_sql())
scannedEps[season][episode] = True
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# Done updating save last update date
self.last_update_indexer = datetime.date.today().toordinal()
self.saveToDB()
return scannedEps
def getImages(self, fanart=None, poster=None):
fanart_result = poster_result = banner_result = False
season_posters_result = season_banners_result = season_all_poster_result = season_all_banner_result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
# FIXME: Needs to not show this message if the option is not enabled?
logger.log(u"Running metadata routines for " + cur_provider.name, logger.DEBUG)
fanart_result = cur_provider.create_fanart(self) or fanart_result
poster_result = cur_provider.create_poster(self) or poster_result
banner_result = cur_provider.create_banner(self) or banner_result
season_posters_result = cur_provider.create_season_posters(self) or season_posters_result
season_banners_result = cur_provider.create_season_banners(self) or season_banners_result
season_all_poster_result = cur_provider.create_season_all_poster(self) or season_all_poster_result
season_all_banner_result = cur_provider.create_season_all_banner(self) or season_all_banner_result
return fanart_result or poster_result or banner_result or season_posters_result or season_banners_result or season_all_poster_result or season_all_banner_result
# make a TVEpisode object from a media file
def makeEpFromFile(self, file):
if not ek.ek(os.path.isfile, file):
logger.log(str(self.indexerid) + u": That isn't even a real file dude... " + file)
return None
logger.log(str(self.indexerid) + u": Creating episode object from " + file, logger.DEBUG)
try:
myParser = NameParser(showObj=self, tryIndexers=True)
parse_result = myParser.parse(file)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + file + " into a valid episode", logger.DEBUG)
return None
except InvalidShowException:
logger.log(u"Unable to parse the filename " + file + " into a valid show", logger.DEBUG)
return None
if not len(parse_result.episode_numbers):
logger.log("parse_result: " + str(parse_result))
logger.log(u"No episode number found in " + file + ", ignoring it", logger.ERROR)
return None
# for now lets assume that any episode in the show dir belongs to that show
season = parse_result.season_number if parse_result.season_number != None else 1
episodes = parse_result.episode_numbers
rootEp = None
sql_l = []
for curEpNum in episodes:
episode = int(curEpNum)
logger.log(
str(self.indexerid) + ": " + file + " parsed to " + self.name + " " + str(season) + "x" + str(episode),
logger.DEBUG)
checkQualityAgain = False
same_file = False
curEp = self.getEpisode(season, episode)
if not curEp:
try:
curEp = self.getEpisode(season, episode, file)
if not curEp:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log(str(self.indexerid) + u": Unable to figure out what this file is, skipping",
logger.ERROR)
continue
else:
# if there is a new file associated with this ep then re-check the quality
if curEp.location and ek.ek(os.path.normpath, curEp.location) != ek.ek(os.path.normpath, file):
logger.log(
u"The old episode had a different file associated with it, I will re-check the quality based on the new filename " + file,
logger.DEBUG)
checkQualityAgain = True
with curEp.lock:
old_size = curEp.file_size
curEp.location = file
# if the sizes are the same then it's probably the same file
if old_size and curEp.file_size == old_size:
same_file = True
else:
same_file = False
curEp.checkForMetaFiles()
if rootEp == None:
rootEp = curEp
else:
if curEp not in rootEp.relatedEps:
with rootEp.lock:
rootEp.relatedEps.append(curEp)
# if it's a new file then
if not same_file:
with curEp.lock:
curEp.release_name = ''
# if they replace a file on me I'll make some attempt at re-checking the quality unless I know it's the same file
if checkQualityAgain and not same_file:
newQuality = Quality.nameQuality(file, self.is_anime)
logger.log(u"Since this file has been renamed, I checked " + file + " and found quality " +
Quality.qualityStrings[newQuality], logger.DEBUG)
if newQuality != Quality.UNKNOWN:
with curEp.lock:
curEp.status = Quality.compositeStatus(DOWNLOADED, newQuality)
# check for status/quality changes as long as it's a new file
elif not same_file and sickbeard.helpers.isMediaFile(file) and curEp.status not in Quality.DOWNLOADED + [
ARCHIVED, IGNORED]:
oldStatus, oldQuality = Quality.splitCompositeStatus(curEp.status)
newQuality = Quality.nameQuality(file, self.is_anime)
if newQuality == Quality.UNKNOWN:
newQuality = Quality.assumeQuality(file)
newStatus = None
# if it was snatched and now exists then set the status correctly
if oldStatus == SNATCHED and oldQuality <= newQuality:
logger.log(u"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
# if it was snatched proper and we found a higher quality one then allow the status change
elif oldStatus == SNATCHED_PROPER and oldQuality < newQuality:
logger.log(u"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
elif oldStatus not in (SNATCHED, SNATCHED_PROPER):
newStatus = DOWNLOADED
if newStatus is not None:
with curEp.lock:
logger.log(u"STATUS: we have an associated file, so setting the status from " + str(
curEp.status) + u" to DOWNLOADED/" + str(Quality.statusFromName(file, anime=self.is_anime)),
logger.DEBUG)
curEp.status = Quality.compositeStatus(newStatus, newQuality)
with curEp.lock:
sql_l.append(curEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# creating metafiles on the root should be good enough
if rootEp:
with rootEp.lock:
rootEp.createMetaFiles()
return rootEp
def loadFromDB(self, skipNFO=False):
logger.log(str(self.indexerid) + u": Loading show info from database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) > 1:
raise exceptions.MultipleDBShowsException()
elif len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find the show in the database")
return
else:
self.indexer = int(sqlResults[0]["indexer"] or 0)
if not self.name:
self.name = sqlResults[0]["show_name"]
if not self.network:
self.network = sqlResults[0]["network"]
if not self.genre:
self.genre = sqlResults[0]["genre"]
if not self.classification:
self.classification = sqlResults[0]["classification"]
self.runtime = sqlResults[0]["runtime"]
self.status = sqlResults[0]["status"]
if self.status is None:
self.status = "Unknown"
self.airs = sqlResults[0]["airs"]
if self.airs is None:
self.airs = ""
self.startyear = int(sqlResults[0]["startyear"] or 0)
self.air_by_date = int(sqlResults[0]["air_by_date"] or 0)
self.anime = int(sqlResults[0]["anime"] or 0)
self.sports = int(sqlResults[0]["sports"] or 0)
self.scene = int(sqlResults[0]["scene"] or 0)
self.subtitles = int(sqlResults[0]["subtitles"] or 0)
self.dvdorder = int(sqlResults[0]["dvdorder"] or 0)
self.archive_firstmatch = int(sqlResults[0]["archive_firstmatch"] or 0)
self.quality = int(sqlResults[0]["quality"] or UNKNOWN)
self.flatten_folders = int(sqlResults[0]["flatten_folders"] or 0)
self.paused = int(sqlResults[0]["paused"] or 0)
try:
self.location = sqlResults[0]["location"]
except Exception:
dirty_setter("_location")(self, sqlResults[0]["location"])
self._isDirGood = False
if not self.lang:
self.lang = sqlResults[0]["lang"]
self.last_update_indexer = sqlResults[0]["last_update_indexer"]
self.rls_ignore_words = sqlResults[0]["rls_ignore_words"]
self.rls_require_words = sqlResults[0]["rls_require_words"]
self.default_ep_status = int(sqlResults[0]["default_ep_status"] or SKIPPED)
if not self.imdbid:
self.imdbid = sqlResults[0]["imdb_id"]
# Get IMDb_info from database
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM imdb_info WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find IMDb show info in the database")
return
else:
self.imdb_info = dict(zip(sqlResults[0].keys(), sqlResults[0]))
self.dirty = False
return True
def loadFromIndexer(self, cache=True, tvapi=None, cachedSeason=None):
logger.log(str(self.indexerid) + u": Loading show info from " + sickbeard.indexerApi(self.indexer).name)
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.indexerid]
try:
self.name = myEp['seriesname'].strip()
except AttributeError:
raise sickbeard.indexer_attributenotfound(
"Found %s, but attribute 'seriesname' was empty." % (self.indexerid))
self.classification = getattr(myEp, 'classification', 'Scripted')
self.genre = getattr(myEp, 'genre', '')
self.network = getattr(myEp, 'network', '')
self.runtime = getattr(myEp, 'runtime', '')
self.imdbid = getattr(myEp, 'imdb_id', '')
if getattr(myEp, 'airs_dayofweek', None) is not None and getattr(myEp, 'airs_time', None) is not None:
self.airs = myEp["airs_dayofweek"] + " " + myEp["airs_time"]
if self.airs is None:
self.airs = ''
if getattr(myEp, 'firstaired', None) is not None:
self.startyear = int(str(myEp["firstaired"]).split('-')[0])
self.status = getattr(myEp, 'status', 'Unknown')
def loadIMDbInfo(self, imdbapi=None):
imdb_info = {'imdb_id': self.imdbid,
'title': '',
'year': '',
'akas': [],
'runtimes': '',
'genres': [],
'countries': '',
'country_codes': [],
'certificates': [],
'rating': '',
'votes': '',
'last_update': ''
}
i = imdb.IMDb()
if not self.imdbid:
self.imdbid = i.title2imdbID(self.name, kind='tv series')
if self.imdbid:
logger.log(str(self.indexerid) + u": Loading show info from IMDb")
imdbTv = i.get_movie(str(re.sub("[^0-9]", "", self.imdbid)))
for key in filter(lambda x: x.replace('_', ' ') in imdbTv.keys(), imdb_info.keys()):
# Store only the first value for string type
if type(imdb_info[key]) == type('') and type(imdbTv.get(key)) == type([]):
imdb_info[key] = imdbTv.get(key.replace('_', ' '))[0]
else:
imdb_info[key] = imdbTv.get(key.replace('_', ' '))
# Filter only the value
if imdb_info['runtimes']:
imdb_info['runtimes'] = re.search('\d+', imdb_info['runtimes']).group(0)
else:
imdb_info['runtimes'] = self.runtime
if imdb_info['akas']:
imdb_info['akas'] = '|'.join(imdb_info['akas'])
else:
imdb_info['akas'] = ''
# Join all genres in a string
if imdb_info['genres']:
imdb_info['genres'] = '|'.join(imdb_info['genres'])
else:
imdb_info['genres'] = ''
# Get only the production country certificate if any
if imdb_info['certificates'] and imdb_info['countries']:
dct = {}
try:
for item in imdb_info['certificates']:
dct[item.split(':')[0]] = item.split(':')[1]
imdb_info['certificates'] = dct[imdb_info['countries']]
except:
imdb_info['certificates'] = ''
else:
imdb_info['certificates'] = ''
if imdb_info['country_codes']:
imdb_info['country_codes'] = '|'.join(imdb_info['country_codes'])
else:
imdb_info['country_codes'] = ''
imdb_info['last_update'] = datetime.date.today().toordinal()
# Rename dict keys without spaces for DB upsert
self.imdb_info = dict(
(k.replace(' ', '_'), k(v) if hasattr(v, 'keys') else v) for k, v in imdb_info.items())
logger.log(str(self.indexerid) + u": Obtained info from IMDb ->" + str(self.imdb_info), logger.DEBUG)
def nextEpisode(self):
logger.log(str(self.indexerid) + ": Finding the episode which airs next", logger.DEBUG)
curDate = datetime.date.today().toordinal()
if not self.nextaired or self.nextaired and curDate > self.nextaired:
myDB = db.DBConnection()
sqlResults = myDB.select(
"SELECT airdate, season, episode FROM tv_episodes WHERE showid = ? AND airdate >= ? AND status IN (?,?) ORDER BY airdate ASC LIMIT 1",
[self.indexerid, datetime.date.today().toordinal(), UNAIRED, WANTED])
if sqlResults == None or len(sqlResults) == 0:
logger.log(str(self.indexerid) + u": No episode found... need to implement a show status",
logger.DEBUG)
self.nextaired = ""
else:
logger.log(str(self.indexerid) + u": Found episode " + str(sqlResults[0]["season"]) + "x" + str(
sqlResults[0]["episode"]), logger.DEBUG)
self.nextaired = sqlResults[0]['airdate']
return self.nextaired
def deleteShow(self, full=False):
sql_l = [["DELETE FROM tv_episodes WHERE showid = ?", [self.indexerid]],
["DELETE FROM tv_shows WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM imdb_info WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM xem_refresh WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM scene_numbering WHERE indexer_id = ?", [self.indexerid]]]
myDB = db.DBConnection()
myDB.mass_action(sql_l)
action = ('delete', 'trash')[sickbeard.TRASH_REMOVE_SHOW]
# remove self from show list
sickbeard.showList = [x for x in sickbeard.showList if int(x.indexerid) != self.indexerid]
# clear the cache
image_cache_dir = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images')
for cache_file in ek.ek(glob.glob, ek.ek(os.path.join, image_cache_dir, str(self.indexerid) + '.*')):
logger.log(u'Attempt to %s cache file %s' % (action, cache_file))
try:
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(cache_file)
else:
os.remove(cache_file)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, cache_file, repr(e), str(e)), logger.WARNING)
# remove entire show folder
if full:
try:
logger.log(u'Attempt to %s show folder %s' % (action, self._location))
# check first the read-only attribute
file_attribute = ek.ek(os.stat, self.location)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG)
try:
ek.ek(os.chmod, self.location, stat.S_IWRITE)
except:
logger.log(u'Unable to change permissions of %s' % self._location, logger.WARNING)
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(self.location)
else:
ek.ek(shutil.rmtree, self.location)
logger.log(u'%s show folder %s' %
(('Deleted', 'Trashed')[sickbeard.TRASH_REMOVE_SHOW],
self._location))
except exceptions.ShowDirNotFoundException:
logger.log(u"Show folder does not exist, no need to %s %s" % (action, self._location), logger.WARNING)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, self._location, repr(e), str(e)), logger.WARNING)
def populateCache(self):
cache_inst = image_cache.ImageCache()
logger.log(u"Checking & filling cache for show " + self.name)
cache_inst.fill_cache(self)
def refreshDir(self):
# make sure the show dir is where we think it is unless dirs are created on the fly
if not ek.ek(os.path.isdir, self._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
return False
# load from dir
self.loadEpisodesFromDir()
# run through all locations from DB, check that they exist
logger.log(str(self.indexerid) + u": Loading all episodes with a location from the database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
sql_l = []
for ep in sqlResults:
curLoc = os.path.normpath(ep["location"])
season = int(ep["season"])
episode = int(ep["episode"])
try:
curEp = self.getEpisode(season, episode)
if not curEp:
raise exceptions.EpisodeDeletedException
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted while we were refreshing it, moving on to the next one",
logger.DEBUG)
continue
# if the path doesn't exist or if it's not in our show dir
if not ek.ek(os.path.isfile, curLoc) or not os.path.normpath(curLoc).startswith(
os.path.normpath(self.location)):
# check if downloaded files still exist, update our data if this has changed
if not sickbeard.SKIP_REMOVED_FILES:
with curEp.lock:
# if it used to have a file associated with it and it doesn't anymore then set it to IGNORED
if curEp.location and curEp.status in Quality.DOWNLOADED:
logger.log(str(self.indexerid) + u": Location for " + str(season) + "x" + str(
episode) + " doesn't exist, removing it and changing our status to IGNORED",
logger.DEBUG)
curEp.status = IGNORED
curEp.subtitles = list()
curEp.subtitles_searchcount = 0
curEp.subtitles_lastsearch = str(datetime.datetime.min)
curEp.location = ''
curEp.hasnfo = False
curEp.hastbn = False
curEp.release_name = ''
sql_l.append(curEp.get_sql())
else:
# the file exists, set its modify file stamp
if sickbeard.AIRDATE_EPISODES:
with curEp.lock:
curEp.airdateModifyStamp()
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def downloadSubtitles(self, force=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + ": Show dir doesn't exist, can't download subtitles", logger.DEBUG)
return
logger.log(str(self.indexerid) + ": Downloading subtitles", logger.DEBUG)
try:
episodes = self.getAllEpisodes(has_location=True)
if not len(episodes) > 0:
logger.log(str(self.indexerid) + ": No episodes to download subtitles for " + self.name, logger.DEBUG)
return
for episode in episodes:
episode.downloadSubtitles(force=force)
except Exception:
logger.log("Error occurred when downloading subtitles: " + traceback.format_exc(), logger.DEBUG)
def saveToDB(self, forceSave=False):
if not self.dirty and not forceSave:
logger.log(str(self.indexerid) + ": Not saving show to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.indexerid) + u": Saving show info to database", logger.DEBUG)
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = {"indexer": self.indexer,
"show_name": self.name,
"location": self._location,
"network": self.network,
"genre": self.genre,
"classification": self.classification,
"runtime": self.runtime,
"quality": self.quality,
"airs": self.airs,
"status": self.status,
"flatten_folders": self.flatten_folders,
"paused": self.paused,
"air_by_date": self.air_by_date,
"anime": self.anime,
"scene": self.scene,
"sports": self.sports,
"subtitles": self.subtitles,
"dvdorder": self.dvdorder,
"archive_firstmatch": self.archive_firstmatch,
"startyear": self.startyear,
"lang": self.lang,
"imdb_id": self.imdbid,
"last_update_indexer": self.last_update_indexer,
"rls_ignore_words": self.rls_ignore_words,
"rls_require_words": self.rls_require_words,
"default_ep_status": self.default_ep_status
}
myDB = db.DBConnection()
myDB.upsert("tv_shows", newValueDict, controlValueDict)
helpers.update_anime_support()
if self.imdbid:
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = self.imdb_info
myDB = db.DBConnection()
myDB.upsert("imdb_info", newValueDict, controlValueDict)
def __str__(self):
toReturn = ""
toReturn += "indexerid: " + str(self.indexerid) + "\n"
toReturn += "indexer: " + str(self.indexer) + "\n"
toReturn += "name: " + self.name + "\n"
toReturn += "location: " + self._location + "\n"
if self.network:
toReturn += "network: " + self.network + "\n"
if self.airs:
toReturn += "airs: " + self.airs + "\n"
toReturn += "status: " + self.status + "\n"
toReturn += "startyear: " + str(self.startyear) + "\n"
if self.genre:
toReturn += "genre: " + self.genre + "\n"
toReturn += "classification: " + self.classification + "\n"
toReturn += "runtime: " + str(self.runtime) + "\n"
toReturn += "quality: " + str(self.quality) + "\n"
toReturn += "scene: " + str(self.is_scene) + "\n"
toReturn += "sports: " + str(self.is_sports) + "\n"
toReturn += "anime: " + str(self.is_anime) + "\n"
return toReturn
def wantEpisode(self, season, episode, quality, manualSearch=False):
logger.log(u"Checking if found episode " + str(season) + "x" + str(episode) + " is wanted at quality " +
Quality.qualityStrings[quality], logger.DEBUG)
# if the quality isn't one we want under any circumstances then just say no
anyQualities, bestQualities = Quality.splitQuality(self.quality)
logger.log(u"any,best = " + str(anyQualities) + " " + str(bestQualities) + " and found " + str(quality),
logger.DEBUG)
if quality not in anyQualities + bestQualities:
logger.log(u"Don't want this quality, ignoring found episode", logger.DEBUG)
return False
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.indexerid, season, episode])
if not sqlResults or not len(sqlResults):
logger.log(u"Unable to find a matching episode in database, ignoring found episode", logger.DEBUG)
return False
epStatus = int(sqlResults[0]["status"])
epStatus_text = statusStrings[epStatus]
logger.log(u"Existing episode status: " + str(epStatus) + " (" + epStatus_text + ")", logger.DEBUG)
# if we know we don't want it then just say no
if epStatus in (SKIPPED, IGNORED, ARCHIVED) and not manualSearch:
logger.log(u"Existing episode status is skipped/ignored/archived, ignoring found episode", logger.DEBUG)
return False
# if it's one of these then we want it as long as it's in our allowed initial qualities
if quality in anyQualities + bestQualities:
if epStatus in (WANTED, UNAIRED, SKIPPED):
logger.log(u"Existing episode status is wanted/unaired/skipped, getting found episode", logger.DEBUG)
return True
elif manualSearch:
logger.log(
u"Usually ignoring found episode, but forced search allows the quality, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Quality is on wanted list, need to check if it's better than existing quality",
logger.DEBUG)
curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
# if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have
if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_BEST and quality in bestQualities and quality > curQuality:
logger.log(u"Episode already exists but the found episode has better quality, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Episode already exists and the found episode has same/lower quality, ignoring found episode",
logger.DEBUG)
logger.log(u"None of the conditions were met, ignoring found episode", logger.DEBUG)
return False
def getOverview(self, epStatus):
if epStatus == WANTED:
return Overview.WANTED
elif epStatus in (UNAIRED, UNKNOWN):
return Overview.UNAIRED
elif epStatus in (SKIPPED, IGNORED):
return Overview.SKIPPED
elif epStatus == ARCHIVED:
return Overview.GOOD
elif epStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.FAILED + Quality.SNATCHED_BEST:
anyQualities, bestQualities = Quality.splitQuality(self.quality) # @UnusedVariable
if bestQualities:
maxBestQuality = max(bestQualities)
else:
maxBestQuality = None
epStatus, curQuality = Quality.splitCompositeStatus(epStatus)
if epStatus == FAILED:
return Overview.WANTED
elif epStatus in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST):
return Overview.SNATCHED
# if they don't want re-downloads then we call it good if they have anything
elif maxBestQuality == None:
return Overview.GOOD
# if they have one but it's not the best they want then mark it as qual
elif curQuality < maxBestQuality:
return Overview.QUAL
# if it's >= maxBestQuality then it's good
else:
return Overview.GOOD
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
class TVEpisode(object):
def __init__(self, show, season, episode, file=""):
self._name = ""
self._season = season
self._episode = episode
self._absolute_number = 0
self._description = ""
self._subtitles = list()
self._subtitles_searchcount = 0
self._subtitles_lastsearch = str(datetime.datetime.min)
self._airdate = datetime.date.fromordinal(1)
self._hasnfo = False
self._hastbn = False
self._status = UNKNOWN
self._indexerid = 0
self._file_size = 0
self._release_name = ''
self._is_proper = False
self._version = 0
self._release_group = ''
# setting any of the above sets the dirty flag
self.dirty = True
self.show = show
self.scene_season = 0
self.scene_episode = 0
self.scene_absolute_number = 0
self._location = file
self._indexer = int(self.show.indexer)
self.lock = threading.Lock()
self.specifyEpisode(self.season, self.episode)
self.relatedEps = []
self.checkForMetaFiles()
self.wantedQuality = []
name = property(lambda self: self._name, dirty_setter("_name"))
season = property(lambda self: self._season, dirty_setter("_season"))
episode = property(lambda self: self._episode, dirty_setter("_episode"))
absolute_number = property(lambda self: self._absolute_number, dirty_setter("_absolute_number"))
description = property(lambda self: self._description, dirty_setter("_description"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
subtitles_searchcount = property(lambda self: self._subtitles_searchcount, dirty_setter("_subtitles_searchcount"))
subtitles_lastsearch = property(lambda self: self._subtitles_lastsearch, dirty_setter("_subtitles_lastsearch"))
airdate = property(lambda self: self._airdate, dirty_setter("_airdate"))
hasnfo = property(lambda self: self._hasnfo, dirty_setter("_hasnfo"))
hastbn = property(lambda self: self._hastbn, dirty_setter("_hastbn"))
status = property(lambda self: self._status, dirty_setter("_status"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
# location = property(lambda self: self._location, dirty_setter("_location"))
file_size = property(lambda self: self._file_size, dirty_setter("_file_size"))
release_name = property(lambda self: self._release_name, dirty_setter("_release_name"))
is_proper = property(lambda self: self._is_proper, dirty_setter("_is_proper"))
version = property(lambda self: self._version, dirty_setter("_version"))
release_group = property(lambda self: self._release_group, dirty_setter("_release_group"))
def _set_location(self, new_location):
logger.log(u"Setter sets location to " + new_location, logger.DEBUG)
# self._location = newLocation
dirty_setter("_location")(self, new_location)
if new_location and ek.ek(os.path.isfile, new_location):
self.file_size = ek.ek(os.path.getsize, new_location)
else:
self.file_size = 0
location = property(lambda self: self._location, _set_location)
def refreshSubtitles(self):
"""Look for subtitles files and refresh the subtitles property"""
self.subtitles = subtitles.subtitlesLanguages(self.location)
def downloadSubtitles(self, force=False):
# TODO: Add support for force option
if not ek.ek(os.path.isfile, self.location):
logger.log(
str(self.show.indexerid) + ": Episode file doesn't exist, can't download subtitles for episode " + str(
self.season) + "x" + str(self.episode), logger.DEBUG)
return
logger.log(str(self.show.indexerid) + ": Downloading subtitles for episode " + str(self.season) + "x" + str(
self.episode), logger.DEBUG)
previous_subtitles = self.subtitles
try:
need_languages = set(sickbeard.SUBTITLES_LANGUAGES) - set(self.subtitles)
subtitles = subliminal.download_subtitles([self.location], languages=need_languages,
services=sickbeard.subtitles.getEnabledServiceList(), force=force,
multi=sickbeard.SUBTITLES_MULTI, cache_dir=sickbeard.CACHE_DIR)
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder " + subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
helpers.chmodAsParent(subtitle.path)
except Exception as e:
logger.log("Error occurred when downloading subtitles: " + traceback.format_exc(), logger.ERROR)
return
self.refreshSubtitles()
self.subtitles_searchcount = self.subtitles_searchcount + 1 if self.subtitles_searchcount else 1 # added the if because sometime it raise an error
self.subtitles_lastsearch = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.saveToDB()
newsubtitles = set(self.subtitles).difference(set(previous_subtitles))
if newsubtitles:
subtitleList = ", ".join(subliminal.language.Language(x).name for x in newsubtitles)
logger.log(str(self.show.indexerid) + u": Downloaded " + subtitleList + " subtitles for episode " + str(
self.season) + "x" + str(self.episode), logger.DEBUG)
notifiers.notify_subtitle_download(self.prettyName(), subtitleList)
else:
logger.log(
str(self.show.indexerid) + u": No subtitles downloaded for episode " + str(self.season) + "x" + str(
self.episode), logger.DEBUG)
if sickbeard.SUBTITLES_HISTORY:
for video in subtitles:
for subtitle in subtitles.get(video):
history.logSubtitle(self.show.indexerid, self.season, self.episode, self.status, subtitle)
return subtitles
def checkForMetaFiles(self):
oldhasnfo = self.hasnfo
oldhastbn = self.hastbn
cur_nfo = False
cur_tbn = False
# check for nfo and tbn
if ek.ek(os.path.isfile, self.location):
for cur_provider in sickbeard.metadata_provider_dict.values():
if cur_provider.episode_metadata:
new_result = cur_provider._has_episode_metadata(self)
else:
new_result = False
cur_nfo = new_result or cur_nfo
if cur_provider.episode_thumbnails:
new_result = cur_provider._has_episode_thumb(self)
else:
new_result = False
cur_tbn = new_result or cur_tbn
self.hasnfo = cur_nfo
self.hastbn = cur_tbn
# if either setting has changed return true, if not return false
return oldhasnfo != self.hasnfo or oldhastbn != self.hastbn
def specifyEpisode(self, season, episode):
sqlResult = self.loadFromDB(season, episode)
if not sqlResult:
# only load from NFO if we didn't load from DB
if ek.ek(os.path.isfile, self.location):
try:
self.loadFromNFO(self.location)
except exceptions.NoNFOException:
logger.log(str(self.show.indexerid) + u": There was an error loading the NFO for episode " + str(
season) + "x" + str(episode), logger.ERROR)
pass
# if we tried loading it from NFO and didn't find the NFO, try the Indexers
if not self.hasnfo:
try:
result = self.loadFromIndexer(season, episode)
except exceptions.EpisodeDeletedException:
result = False
# if we failed SQL *and* NFO, Indexers then fail
if not result:
raise exceptions.EpisodeNotFoundException(
"Couldn't find episode " + str(season) + "x" + str(episode))
def loadFromDB(self, season, episode):
logger.log(
str(self.show.indexerid) + u": Loading episode details from DB for episode " + str(season) + "x" + str(
episode), logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.show.indexerid, season, episode])
if len(sqlResults) > 1:
raise exceptions.MultipleDBEpisodesException("Your DB has two records for the same show somehow.")
elif len(sqlResults) == 0:
logger.log(str(self.show.indexerid) + u": Episode " + str(self.season) + "x" + str(
self.episode) + " not found in the database", logger.DEBUG)
return False
else:
# NAMEIT logger.log(u"AAAAA from" + str(self.season)+"x"+str(self.episode) + " -" + self.name + " to " + str(sqlResults[0]["name"]))
if sqlResults[0]["name"]:
self.name = sqlResults[0]["name"]
self.season = season
self.episode = episode
self.absolute_number = sqlResults[0]["absolute_number"]
self.description = sqlResults[0]["description"]
if not self.description:
self.description = ""
if sqlResults[0]["subtitles"] and sqlResults[0]["subtitles"]:
self.subtitles = sqlResults[0]["subtitles"].split(",")
self.subtitles_searchcount = sqlResults[0]["subtitles_searchcount"]
self.subtitles_lastsearch = sqlResults[0]["subtitles_lastsearch"]
self.airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"]))
# logger.log(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0]["status"]), logger.DEBUG)
self.status = int(sqlResults[0]["status"] or -1)
# don't overwrite my location
if sqlResults[0]["location"] and sqlResults[0]["location"]:
self.location = os.path.normpath(sqlResults[0]["location"])
if sqlResults[0]["file_size"]:
self.file_size = int(sqlResults[0]["file_size"])
else:
self.file_size = 0
self.indexerid = int(sqlResults[0]["indexerid"])
self.indexer = int(sqlResults[0]["indexer"])
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
try:
self.scene_season = int(sqlResults[0]["scene_season"])
except:
self.scene_season = 0
try:
self.scene_episode = int(sqlResults[0]["scene_episode"])
except:
self.scene_episode = 0
try:
self.scene_absolute_number = int(sqlResults[0]["scene_absolute_number"])
except:
self.scene_absolute_number = 0
if self.scene_absolute_number == 0:
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
if self.scene_season == 0 or self.scene_episode == 0:
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
if sqlResults[0]["release_name"] is not None:
self.release_name = sqlResults[0]["release_name"]
if sqlResults[0]["is_proper"]:
self.is_proper = int(sqlResults[0]["is_proper"])
if sqlResults[0]["version"]:
self.version = int(sqlResults[0]["version"])
if sqlResults[0]["release_group"] is not None:
self.release_group = sqlResults[0]["release_group"]
self.dirty = False
return True
def loadFromIndexer(self, season=None, episode=None, cache=True, tvapi=None, cachedSeason=None):
if season is None:
season = self.season
if episode is None:
episode = self.episode
logger.log(str(self.show.indexerid) + u": Loading episode details from " + sickbeard.indexerApi(
self.show.indexer).name + " for episode " + str(season) + "x" + str(episode), logger.DEBUG)
indexer_lang = self.show.lang
try:
if cachedSeason is None:
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if indexer_lang:
lINDEXER_API_PARMS['language'] = indexer_lang
if self.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.show.indexerid][season][episode]
else:
myEp = cachedSeason[episode]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " threw up an error: " + ex(e), logger.DEBUG)
# if the episode is already valid just log it, if not throw it up
if self.name:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out but we have enough info from other sources, allowing the error",
logger.DEBUG)
return
else:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " timed out, unable to create the episode",
logger.ERROR)
return False
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find the episode on " + sickbeard.indexerApi(
self.indexer).name + "... has it been removed? Should I delete from db?", logger.DEBUG)
# if I'm no longer on the Indexers but I once was then delete myself from the DB
if self.indexerid != -1:
self.deleteEpisode()
return
if getattr(myEp, 'episodename', None) is None:
logger.log(u"This episode (" + self.show.name + " - " + str(season) + "x" + str(
episode) + ") has no name on " + sickbeard.indexerApi(self.indexer).name + "")
# if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
if getattr(myEp, 'absolute_number', None) is None:
logger.log(u"This episode (" + self.show.name + " - " + str(season) + "x" + str(
episode) + ") has no absolute number on " + sickbeard.indexerApi(
self.indexer).name, logger.DEBUG)
else:
logger.log(
str(self.show.indexerid) + ": The absolute_number for " + str(season) + "x" + str(episode) + " is : " +
str(myEp["absolute_number"]), logger.DEBUG)
self.absolute_number = int(myEp["absolute_number"])
self.name = getattr(myEp, 'episodename', "")
self.season = season
self.episode = episode
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = getattr(myEp, 'overview', "")
firstaired = getattr(myEp, 'firstaired', None)
if not firstaired or firstaired == "0000-00-00":
firstaired = str(datetime.date.fromordinal(1))
rawAirdate = [int(x) for x in firstaired.split("-")]
try:
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
except (ValueError, IndexError):
logger.log(u"Malformed air date of " + str(firstaired) + " retrieved from " + sickbeard.indexerApi(
self.indexer).name + " for (" + self.show.name + " - " + str(season) + "x" + str(episode) + ")",
logger.WARNING)
# if I'm incomplete on the indexer but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
# early conversion to int so that episode doesn't get marked dirty
self.indexerid = getattr(myEp, 'id', None)
if self.indexerid is None:
logger.log(u"Failed to retrieve ID from " + sickbeard.indexerApi(self.indexer).name, logger.ERROR)
if self.indexerid != -1:
self.deleteEpisode()
return False
# don't update show status if show dir is missing, unless it's missing on purpose
if not ek.ek(os.path.isdir,
self.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS and not sickbeard.ADD_SHOWS_WO_DIR:
logger.log(
u"The show dir is missing, not bothering to change the episode statuses since it'd probably be invalid")
return
if self.location:
logger.log(str(self.show.indexerid) + u": Setting status for " + str(season) + "x" + str(
episode) + " based on status " + str(self.status) + " and existence of " + self.location, logger.DEBUG)
# if we don't have the file
if not ek.ek(os.path.isfile, self.location):
# if it hasn't aired yet set the status to UNAIRED
if self.airdate >= datetime.date.today() and self.status in [SKIPPED, UNAIRED, UNKNOWN, WANTED]:
logger.log(u"Episode airs in the future, marking it " + str(UNAIRED), logger.DEBUG)
self.status = UNAIRED
# if there's no airdate then set it to skipped (and respect ignored)
elif self.airdate == datetime.date.fromordinal(1):
if self.status == IGNORED:
logger.log(u"Episode has no air date, but it's already marked as ignored", logger.DEBUG)
else:
logger.log(u"Episode has no air date, automatically marking it skipped", logger.DEBUG)
self.status = SKIPPED
# if we don't have the file and the airdate is in the past
else:
if self.status == UNAIRED:
if self.season > 0:
self.status = WANTED
else:
self.status = SKIPPED
# if we somehow are still UNKNOWN then just use the shows defined default status or SKIPPED
elif self.status == UNKNOWN:
self.status = self.show.default_ep_status
else:
logger.log(
u"Not touching status because we have no ep file, the airdate is in the past, and the status is " + str(
self.status), logger.DEBUG)
# if we have a media file then it's downloaded
elif sickbeard.helpers.isMediaFile(self.location):
# leave propers alone, you have to either post-process them or manually change them back
if self.status not in Quality.SNATCHED_PROPER + Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED]:
logger.log(
u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)),
logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
# shouldn't get here probably
else:
logger.log(u"6 Status changes from " + str(self.status) + " to " + str(UNKNOWN), logger.DEBUG)
self.status = UNKNOWN
def loadFromNFO(self, location):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(
str(self.show.indexerid) + u": The show dir is missing, not bothering to try loading the episode NFO")
return
logger.log(
str(self.show.indexerid) + u": Loading episode details from the NFO file associated with " + location,
logger.DEBUG)
self.location = location
if self.location != "":
if self.status == UNKNOWN:
if sickbeard.helpers.isMediaFile(self.location):
logger.log(u"7 Status changes from " + str(self.status) + " to " + str(
Quality.statusFromName(self.location, anime=self.show.is_anime)), logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo")
logger.log(str(self.show.indexerid) + u": Using NFO name " + nfoFile, logger.DEBUG)
if ek.ek(os.path.isfile, nfoFile):
try:
showXML = etree.ElementTree(file=nfoFile)
except (SyntaxError, ValueError), e:
logger.log(u"Error loading the NFO, backing up the NFO and skipping for now: " + ex(e),
logger.ERROR) # TODO: figure out what's wrong and fix it
try:
ek.ek(os.rename, nfoFile, nfoFile + ".old")
except Exception, e:
logger.log(
u"Failed to rename your episode's NFO file - you need to delete it or fix it: " + ex(e),
logger.ERROR)
raise exceptions.NoNFOException("Error in NFO format")
for epDetails in showXML.getiterator('episodedetails'):
if epDetails.findtext('season') is None or int(epDetails.findtext('season')) != self.season or \
epDetails.findtext('episode') is None or int(
epDetails.findtext('episode')) != self.episode:
logger.log(str(
self.show.indexerid) + u": NFO has an <episodedetails> block for a different episode - wanted " + str(
self.season) + "x" + str(self.episode) + " but got " + str(
epDetails.findtext('season')) + "x" + str(epDetails.findtext('episode')), logger.DEBUG)
continue
if epDetails.findtext('title') is None or epDetails.findtext('aired') is None:
raise exceptions.NoNFOException("Error in NFO format (missing episode title or airdate)")
self.name = epDetails.findtext('title')
self.episode = int(epDetails.findtext('episode'))
self.season = int(epDetails.findtext('season'))
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = epDetails.findtext('plot')
if self.description is None:
self.description = ""
if epDetails.findtext('aired'):
rawAirdate = [int(x) for x in epDetails.findtext('aired').split("-")]
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
else:
self.airdate = datetime.date.fromordinal(1)
self.hasnfo = True
else:
self.hasnfo = False
if ek.ek(os.path.isfile, sickbeard.helpers.replaceExtension(nfoFile, "tbn")):
self.hastbn = True
else:
self.hastbn = False
def __str__(self):
toReturn = ""
toReturn += str(self.show.name) + " - " + str(self.season) + "x" + str(self.episode) + " - " + str(
self.name) + "\n"
toReturn += "location: " + str(self.location) + "\n"
toReturn += "description: " + str(self.description) + "\n"
toReturn += "subtitles: " + str(",".join(self.subtitles)) + "\n"
toReturn += "subtitles_searchcount: " + str(self.subtitles_searchcount) + "\n"
toReturn += "subtitles_lastsearch: " + str(self.subtitles_lastsearch) + "\n"
toReturn += "airdate: " + str(self.airdate.toordinal()) + " (" + str(self.airdate) + ")\n"
toReturn += "hasnfo: " + str(self.hasnfo) + "\n"
toReturn += "hastbn: " + str(self.hastbn) + "\n"
toReturn += "status: " + str(self.status) + "\n"
return toReturn
def createMetaFiles(self):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(str(self.show.indexerid) + u": The show dir is missing, not bothering to try to create metadata")
return
self.createNFO()
self.createThumbnail()
if self.checkForMetaFiles():
self.saveToDB()
def createNFO(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_metadata(self) or result
return result
def createThumbnail(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_thumb(self) or result
return result
def deleteEpisode(self):
logger.log(u"Deleting " + self.show.name + " " + str(self.season) + "x" + str(self.episode) + " from the DB",
logger.DEBUG)
# remove myself from the show dictionary
if self.show.getEpisode(self.season, self.episode, noCreate=True) == self:
logger.log(u"Removing myself from my show's list", logger.DEBUG)
del self.show.episodes[self.season][self.episode]
# delete myself from the DB
logger.log(u"Deleting myself from the database", logger.DEBUG)
myDB = db.DBConnection()
sql = "DELETE FROM tv_episodes WHERE showid=" + str(self.show.indexerid) + " AND season=" + str(
self.season) + " AND episode=" + str(self.episode)
myDB.action(sql)
raise exceptions.EpisodeDeletedException()
def get_sql(self, forceSave=False):
"""
Creates SQL queue for this episode if any of its data has been changed since the last save.
forceSave: If True it will create SQL queue even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not creating SQL queue - record is not dirty", logger.DEBUG)
return
myDB = db.DBConnection()
rows = myDB.select(
'SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?',
[self.show.indexerid, self.season, self.episode])
epID = None
if rows:
epID = int(rows[0]['episode_id'])
if epID:
# use a custom update method to get the data into the DB for existing records.
return [
"UPDATE tv_episodes SET indexerid = ?, indexer = ?, name = ?, description = ?, subtitles = ?, "
"subtitles_searchcount = ?, subtitles_lastsearch = ?, airdate = ?, hasnfo = ?, hastbn = ?, status = ?, "
"location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, "
"absolute_number = ?, version = ?, release_group = ? WHERE episode_id = ?",
[self.indexerid, self.indexer, self.name, self.description, ",".join([sub for sub in self.subtitles]),
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn,
self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid,
self.season, self.episode, self.absolute_number, self.version, self.release_group, epID]]
else:
# use a custom insert method to get the data into the DB.
return [
"INSERT OR IGNORE INTO tv_episodes (episode_id, indexerid, indexer, name, description, subtitles, "
"subtitles_searchcount, subtitles_lastsearch, airdate, hasnfo, hastbn, status, location, file_size, "
"release_name, is_proper, showid, season, episode, absolute_number, version, release_group) VALUES "
"((SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?)"
",?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",
[self.show.indexerid, self.season, self.episode, self.indexerid, self.indexer, self.name,
self.description,
",".join([sub for sub in self.subtitles]), self.subtitles_searchcount, self.subtitles_lastsearch,
self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.location, self.file_size,
self.release_name, self.is_proper, self.show.indexerid, self.season, self.episode,
self.absolute_number, self.version, self.release_group]]
def saveToDB(self, forceSave=False):
"""
Saves this episode to the database if any of its data has been changed since the last save.
forceSave: If True it will save to the database even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not saving episode to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.show.indexerid) + u": Saving episode details to database", logger.DEBUG)
logger.log(u"STATUS IS " + str(self.status), logger.DEBUG)
newValueDict = {"indexerid": self.indexerid,
"indexer": self.indexer,
"name": self.name,
"description": self.description,
"subtitles": ",".join([sub for sub in self.subtitles]),
"subtitles_searchcount": self.subtitles_searchcount,
"subtitles_lastsearch": self.subtitles_lastsearch,
"airdate": self.airdate.toordinal(),
"hasnfo": self.hasnfo,
"hastbn": self.hastbn,
"status": self.status,
"location": self.location,
"file_size": self.file_size,
"release_name": self.release_name,
"is_proper": self.is_proper,
"absolute_number": self.absolute_number,
"version": self.version,
"release_group": self.release_group
}
controlValueDict = {"showid": self.show.indexerid,
"season": self.season,
"episode": self.episode}
# use a custom update/insert method to get the data into the DB
myDB = db.DBConnection()
myDB.upsert("tv_episodes", newValueDict, controlValueDict)
def fullPath(self):
if self.location == None or self.location == "":
return None
else:
return ek.ek(os.path.join, self.show.location, self.location)
def createStrings(self, pattern=None):
patterns = [
'%S.N.S%SE%0E',
'%S.N.S%0SE%E',
'%S.N.S%SE%E',
'%S.N.S%0SE%0E',
'%SN S%SE%0E',
'%SN S%0SE%E',
'%SN S%SE%E',
'%SN S%0SE%0E'
]
strings = []
if not pattern:
for p in patterns:
strings += [self._format_pattern(p)]
return strings
return self._format_pattern(pattern)
def prettyName(self):
"""
Returns the name of this episode in a "pretty" human-readable format. Used for logging
and notifications and such.
Returns: A string representing the episode's name and season/ep numbers
"""
if self.show.anime and not self.show.scene:
return self._format_pattern('%SN - %AB - %EN')
elif self.show.air_by_date:
return self._format_pattern('%SN - %AD - %EN')
return self._format_pattern('%SN - %Sx%0E - %EN')
def _ep_name(self):
"""
Returns the name of the episode to use during renaming. Combines the names of related episodes.
Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name"
"Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name"
"""
multiNameRegex = "(.*) \(\d{1,2}\)"
self.relatedEps = sorted(self.relatedEps, key=lambda x: x.episode)
if len(self.relatedEps) == 0:
goodName = self.name
else:
goodName = ''
singleName = True
curGoodName = None
for curName in [self.name] + [x.name for x in self.relatedEps]:
match = re.match(multiNameRegex, curName)
if not match:
singleName = False
break
if curGoodName == None:
curGoodName = match.group(1)
elif curGoodName != match.group(1):
singleName = False
break
if singleName:
goodName = curGoodName
else:
goodName = self.name
for relEp in self.relatedEps:
goodName += " & " + relEp.name
return goodName
def _replace_map(self):
"""
Generates a replacement map for this episode which maps all possible custom naming patterns to the correct
value for this episode.
Returns: A dict with patterns as the keys and their replacement values as the values.
"""
ep_name = self._ep_name()
def dot(name):
return helpers.sanitizeSceneName(name)
def us(name):
return re.sub('[ -]', '_', name)
def release_name(name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
return name
def release_group(show, name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
else:
return ""
try:
np = NameParser(name, showObj=show, naming_pattern=True)
parse_result = np.parse(name)
except (InvalidNameException, InvalidShowException), e:
logger.log(u"Unable to get parse release_group: " + ex(e), logger.DEBUG)
return ''
if not parse_result.release_group:
return ''
return parse_result.release_group
epStatus, epQual = Quality.splitCompositeStatus(self.status) # @UnusedVariable
if sickbeard.NAMING_STRIP_YEAR:
show_name = re.sub("\(\d+\)$", "", self.show.name).rstrip()
else:
show_name = self.show.name
return {
'%SN': show_name,
'%S.N': dot(show_name),
'%S_N': us(show_name),
'%EN': ep_name,
'%E.N': dot(ep_name),
'%E_N': us(ep_name),
'%QN': Quality.qualityStrings[epQual],
'%Q.N': dot(Quality.qualityStrings[epQual]),
'%Q_N': us(Quality.qualityStrings[epQual]),
'%S': str(self.season),
'%0S': '%02d' % self.season,
'%E': str(self.episode),
'%0E': '%02d' % self.episode,
'%XS': str(self.scene_season),
'%0XS': '%02d' % self.scene_season,
'%XE': str(self.scene_episode),
'%0XE': '%02d' % self.scene_episode,
'%AB': '%(#)03d' % {'#': self.absolute_number},
'%XAB': '%(#)03d' % {'#': self.scene_absolute_number},
'%RN': release_name(self.release_name),
'%RG': release_group(self.show, self.release_name),
'%AD': str(self.airdate).replace('-', ' '),
'%A.D': str(self.airdate).replace('-', '.'),
'%A_D': us(str(self.airdate)),
'%A-D': str(self.airdate),
'%Y': str(self.airdate.year),
'%M': str(self.airdate.month),
'%D': str(self.airdate.day),
'%0M': '%02d' % self.airdate.month,
'%0D': '%02d' % self.airdate.day,
'%RT': "PROPER" if self.is_proper else "",
}
def _format_string(self, pattern, replace_map):
"""
Replaces all template strings with the correct value
"""
result_name = pattern
# do the replacements
for cur_replacement in sorted(replace_map.keys(), reverse=True):
result_name = result_name.replace(cur_replacement, helpers.sanitizeFileName(replace_map[cur_replacement]))
result_name = result_name.replace(cur_replacement.lower(),
helpers.sanitizeFileName(replace_map[cur_replacement].lower()))
return result_name
def _format_pattern(self, pattern=None, multi=None, anime_type=None):
"""
Manipulates an episode naming pattern and then fills the template in
"""
if pattern == None:
pattern = sickbeard.NAMING_PATTERN
if multi == None:
multi = sickbeard.NAMING_MULTI_EP
if anime_type == None:
anime_type = sickbeard.NAMING_ANIME
replace_map = self._replace_map()
result_name = pattern
# if there's no release group then replace it with a reasonable facsimile
if not replace_map['%RN']:
if self.show.air_by_date or self.show.sports:
result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n-sickrage')
elif anime_type != 3:
result_name = result_name.replace('%RN', '%S.N.%AB.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.%ab.%e.n-sickrage')
else:
result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N-SiCKRAGE')
result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n-sickrage')
result_name = result_name.replace('%RG', 'SICKRAGE')
result_name = result_name.replace('%rg', 'sickrage')
logger.log(u"Episode has no release name, replacing it with a generic one: " + result_name, logger.DEBUG)
if not replace_map['%RT']:
result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name)
# split off ep name part only
name_groups = re.split(r'[\\/]', result_name)
# figure out the double-ep numbering style for each group, if applicable
for cur_name_group in name_groups:
season_format = sep = ep_sep = ep_format = None
season_ep_regex = '''
(?P<pre_sep>[ _.-]*)
((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N))
(.*?)
(%0?E(?![._]?N))
(?P<post_sep>[ _.-]*)
'''
ep_only_regex = '(E?%0?E(?![._]?N))'
# try the normal way
season_ep_match = re.search(season_ep_regex, cur_name_group, re.I | re.X)
ep_only_match = re.search(ep_only_regex, cur_name_group, re.I | re.X)
# if we have a season and episode then collect the necessary data
if season_ep_match:
season_format = season_ep_match.group(2)
ep_sep = season_ep_match.group(3)
ep_format = season_ep_match.group(4)
sep = season_ep_match.group('pre_sep')
if not sep:
sep = season_ep_match.group('post_sep')
if not sep:
sep = ' '
# force 2-3-4 format if they chose to extend
if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED):
ep_sep = '-'
regex_used = season_ep_regex
# if there's no season then there's not much choice so we'll just force them to use 03-04-05 style
elif ep_only_match:
season_format = ''
ep_sep = '-'
ep_format = ep_only_match.group(1)
sep = ''
regex_used = ep_only_regex
else:
continue
# we need at least this much info to continue
if not ep_sep or not ep_format:
continue
# start with the ep string, eg. E03
ep_string = self._format_string(ep_format.upper(), replace_map)
for other_ep in self.relatedEps:
# for limited extend we only append the last ep
if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED) and other_ep != self.relatedEps[
-1]:
continue
elif multi == NAMING_DUPLICATE:
# add " - S01"
ep_string += sep + season_format
elif multi == NAMING_SEPARATED_REPEAT:
ep_string += sep
# add "E04"
ep_string += ep_sep
if multi == NAMING_LIMITED_EXTEND_E_PREFIXED:
ep_string += 'E'
ep_string += other_ep._format_string(ep_format.upper(), other_ep._replace_map())
if anime_type != 3:
if self.absolute_number == 0:
curAbsolute_number = self.episode
else:
curAbsolute_number = self.absolute_number
if self.season != 0: # dont set absolute numbers if we are on specials !
if anime_type == 1: # this crazy person wants both ! (note: +=)
ep_string += sep + "%(#)03d" % {
"#": curAbsolute_number}
elif anime_type == 2: # total anime freak only need the absolute number ! (note: =)
ep_string = "%(#)03d" % {"#": curAbsolute_number}
for relEp in self.relatedEps:
if relEp.absolute_number != 0:
ep_string += '-' + "%(#)03d" % {"#": relEp.absolute_number}
else:
ep_string += '-' + "%(#)03d" % {"#": relEp.episode}
regex_replacement = None
if anime_type == 2:
regex_replacement = r'\g<pre_sep>' + ep_string + r'\g<post_sep>'
elif season_ep_match:
regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>'
elif ep_only_match:
regex_replacement = ep_string
if regex_replacement:
# fill out the template for this piece and then insert this piece into the actual pattern
cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group)
# cur_name_group_result = cur_name_group.replace(ep_format, ep_string)
# logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" from "+cur_name_group, logger.DEBUG)
result_name = result_name.replace(cur_name_group, cur_name_group_result)
result_name = self._format_string(result_name, replace_map)
logger.log(u"formatting pattern: " + pattern + " -> " + result_name, logger.DEBUG)
return result_name
def proper_path(self):
"""
Figures out the path where this episode SHOULD live according to the renaming rules, relative from the show dir
"""
anime_type = sickbeard.NAMING_ANIME
if not self.show.is_anime:
anime_type = 3
result = self.formatted_filename(anime_type=anime_type)
# if they want us to flatten it and we're allowed to flatten it then we will
if self.show.flatten_folders and not sickbeard.NAMING_FORCE_FOLDERS:
return result
# if not we append the folder on and use that
else:
result = ek.ek(os.path.join, self.formatted_dir(), result)
return result
def formatted_dir(self, pattern=None, multi=None):
"""
Just the folder name of the episode
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
if len(name_groups) == 1:
return ''
else:
return self._format_pattern(os.sep.join(name_groups[:-1]), multi)
def formatted_filename(self, pattern=None, multi=None, anime_type=None):
"""
Just the filename of the episode, formatted based on the naming settings
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return self._format_pattern(name_groups[-1], multi, anime_type)
def rename(self):
"""
Renames an episode file and all related files to the location and filename as specified
in the naming settings.
"""
if not ek.ek(os.path.isfile, self.location):
logger.log(u"Can't perform rename on " + self.location + " when it doesn't exist, skipping", logger.WARNING)
return
proper_path = self.proper_path()
absolute_proper_path = ek.ek(os.path.join, self.show.location, proper_path)
absolute_current_path_no_ext, file_ext = ek.ek(os.path.splitext, self.location)
absolute_current_path_no_ext_length = len(absolute_current_path_no_ext)
related_subs = []
current_path = absolute_current_path_no_ext
if absolute_current_path_no_ext.startswith(self.show.location):
current_path = absolute_current_path_no_ext[len(self.show.location):]
logger.log(u"Renaming/moving episode from the base path " + self.location + " to " + absolute_proper_path,
logger.DEBUG)
# if it's already named correctly then don't do anything
if proper_path == current_path:
logger.log(str(self.indexerid) + u": File " + self.location + " is already named correctly, skipping",
logger.DEBUG)
return
related_files = postProcessor.PostProcessor(self.location).list_associated_files(
self.location)
if self.show.subtitles and sickbeard.SUBTITLES_DIR != '':
related_subs = postProcessor.PostProcessor(self.location).list_associated_files(sickbeard.SUBTITLES_DIR,
subtitles_only=True)
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
logger.log(u"Files associated to " + self.location + ": " + str(related_files), logger.DEBUG)
# move the ep file
result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length)
# move related files
for cur_related_file in related_files:
cur_result = helpers.rename_ep_file(cur_related_file, absolute_proper_path,
absolute_current_path_no_ext_length)
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_file, logger.ERROR)
for cur_related_sub in related_subs:
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
cur_result = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path,
absolute_current_path_no_ext_length)
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_sub, logger.ERROR)
# save the ep
with self.lock:
if result:
self.location = absolute_proper_path + file_ext
for relEp in self.relatedEps:
relEp.location = absolute_proper_path + file_ext
# in case something changed with the metadata just do a quick check
for curEp in [self] + self.relatedEps:
curEp.checkForMetaFiles()
# save any changes to the databas
sql_l = []
with self.lock:
for relEp in [self] + self.relatedEps:
sql_l.append(relEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def airdateModifyStamp(self):
"""
Make the modify date and time of a file reflect the show air date and time.
Note: Also called from postProcessor
"""
hr = min = 0
airs = re.search('.*?(\d{1,2})(?::\s*?(\d{2}))?\s*(pm)?', self.show.airs, re.I)
if airs:
hr = int(airs.group(1))
hr = (12 + hr, hr)[None is airs.group(3)]
hr = (hr, hr - 12)[0 == hr % 12 and 0 != hr]
min = int((airs.group(2), min)[None is airs.group(2)])
airtime = datetime.time(hr, min)
airdatetime = datetime.datetime.combine(self.airdate, airtime)
filemtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.location))
if filemtime != airdatetime:
import time
airdatetime = airdatetime.timetuple()
logger.log(str(self.show.indexerid) + u": About to modify date of '" + self.location
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.DEBUG)
try:
if helpers.touchFile(self.location, time.mktime(airdatetime)):
logger.log(str(self.show.indexerid) + u": Changed modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime))
else:
logger.log(str(self.show.indexerid) + u": Unable to modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
except:
logger.log(str(self.show.indexerid) + u": Failed to modify date of '" + os.path.basename(self.location)
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
| gpl-3.0 | 470,947,764,053,170,940 | 42.345894 | 221 | 0.563395 | false |
baloo/shinken | shinken/modules/ip_tag_arbiter/__init__.py | 1 | 1426 | #!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
properties = {
'daemons' : ['arbiter'],
'type' : 'ip_tag',
}
#called by the plugin manager to get a broker
def get_instance(plugin):
#print "Get a Service Perfdata broker for plugin %s" % plugin.get_name()
# First try to import
try:
from ip_tag_arbiter import Ip_Tag_Arbiter
except ImportError , exp:
print "Warning : the plugin type %s is unavalable : %s" % ('ip_tag', exp)
return None
# Catch errors
ip_range = plugin.ip_range
prop = plugin.property
value = plugin.value
method = getattr(plugin, 'method', 'replace')
instance = Ip_Tag_Arbiter(plugin, ip_range, prop, value, method)
return instance
| agpl-3.0 | -5,142,636,619,774,961,000 | 30.688889 | 81 | 0.699158 | false |
jcushman/pywb | pywb/warc/archiveiterator.py | 1 | 14850 | from pywb.utils.timeutils import iso_date_to_timestamp
from pywb.utils.bufferedreaders import DecompressingBufferedReader
from pywb.utils.canonicalize import canonicalize
from pywb.utils.loaders import extract_post_query, append_post_query
from recordloader import ArcWarcRecordLoader
import hashlib
import base64
import re
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
#=================================================================
class ArchiveIterator(object):
""" Iterate over records in WARC and ARC files, both gzip chunk
compressed and uncompressed
The indexer will automatically detect format, and decompress
if necessary.
"""
GZIP_ERR_MSG = """
ERROR: Non-chunked gzip file detected, gzip block continues
beyond single record.
This file is probably not a multi-chunk gzip but a single gzip file.
To allow seek, a gzipped {1} must have each record compressed into
a single gzip chunk and concatenated together.
This file is likely still valid and you can use it by decompressing it:
gunzip myfile.{0}.gz
You can then also use the 'warc2warc' tool from the 'warc-tools'
package which will create a properly chunked gzip file:
warc2warc -Z myfile.{0} > myfile.{0}.gz
"""
def __init__(self, fileobj, no_record_parse=False,
verify_http=False):
self.fh = fileobj
self.loader = ArcWarcRecordLoader(verify_http=verify_http)
self.reader = None
self.offset = 0
self.known_format = None
self.member_info = None
self.no_record_parse = no_record_parse
def iter_records(self, block_size=16384):
""" iterate over each record
"""
decomp_type = 'gzip'
self.reader = DecompressingBufferedReader(self.fh,
block_size=block_size)
self.offset = self.fh.tell()
self.next_line = None
is_valid = True
while True:
try:
record = self._next_record(self.next_line)
if not is_valid:
self._raise_err()
yield record
except EOFError:
break
self.read_to_end(record)
if self.reader.decompressor:
is_valid = self.reader.read_next_member()
def _raise_err(self):
frmt = 'warc/arc'
if self.known_format:
frmt = self.known_format
frmt_up = frmt.upper()
msg = self.GZIP_ERR_MSG.format(frmt, frmt_up)
raise Exception(msg)
def _consume_blanklines(self):
""" Consume blank lines that are between records
- For warcs, there are usually 2
- For arcs, may be 1 or 0
- For block gzipped files, these are at end of each gzip envelope
and are included in record length which is the full gzip envelope
- For uncompressed, they are between records and so are NOT part of
the record length
count empty_size so that it can be substracted from
the record length for uncompressed
"""
empty_size = 0
while True:
line = self.reader.readline()
if len(line) == 0:
return None, empty_size
if line.rstrip() == '':
empty_size += len(line)
continue
return line, empty_size
def read_to_end(self, record, compute_digest=False):
""" Read remainder of the stream
If a digester is included, update it
with the data read
"""
# already at end of this record, don't read until it is consumed
if self.member_info:
return None
if compute_digest:
digester = hashlib.sha1()
else:
digester = None
num = 0
curr_offset = self.offset
while True:
b = record.stream.read(8192)
if not b:
break
num += len(b)
if digester:
digester.update(b)
"""
- For compressed files, blank lines are consumed
since they are part of record length
- For uncompressed files, blank lines are read later,
and not included in the record length
"""
#if self.reader.decompressor:
self.next_line, empty_size = self._consume_blanklines()
self.offset = self.fh.tell() - self.reader.rem_length()
#if self.offset < 0:
# raise Exception('Not Gzipped Properly')
if self.next_line:
self.offset -= len(self.next_line)
length = self.offset - curr_offset
if not self.reader.decompressor:
length -= empty_size
if compute_digest:
digest = base64.b32encode(digester.digest())
else:
digest = None
self.member_info = (curr_offset, length, digest)
#return self.member_info
#return next_line
def _next_record(self, next_line):
""" Use loader to parse the record from the reader stream
Supporting warc and arc records
"""
record = self.loader.parse_record_stream(self.reader,
next_line,
self.known_format,
self.no_record_parse)
self.member_info = None
# Track known format for faster parsing of other records
self.known_format = record.format
return record
#=================================================================
class ArchiveIndexEntryMixin(object):
MIME_RE = re.compile('[; ]')
def reset_entry(self):
self['urlkey'] = ''
def extract_mime(self, mime, def_mime='unk'):
""" Utility function to extract mimetype only
from a full content type, removing charset settings
"""
self['mime'] = def_mime
if mime:
self['mime'] = self.MIME_RE.split(mime, 1)[0]
def extract_status(self, status_headers):
""" Extract status code only from status line
"""
self['status'] = status_headers.get_statuscode()
if not self['status']:
self['status'] = '-'
elif self['status'] == '204' and 'Error' in status_headers.statusline:
self['status'] = '-'
def set_rec_info(self, offset, length, digest):
if digest:
self['digest'] = digest
self['length'] = str(length)
self['offset'] = str(offset)
def merge_request_data(self, other, options):
surt_ordered = options.get('surt_ordered', True)
if other.record.rec_type != 'request':
return False
# two requests, not correct
if self.record.rec_type == 'request':
return False
# merge POST/PUT body query
post_query = other.get('_post_query')
if post_query:
url = append_post_query(self['url'], post_query)
self['urlkey'] = canonicalize(url, surt_ordered)
other['urlkey'] = self['urlkey']
referer = other.record.status_headers.get_header('referer')
if referer:
self['_referer'] = referer
return True
#=================================================================
class DefaultRecordIter(object):
def __init__(self, **options):
self.options = options
self.entry_cache = {}
def _create_index_entry(self, rec_type):
try:
entry = self.entry_cache[rec_type]
entry.reset_entry()
except:
if self.options.get('cdxj'):
entry = OrderedArchiveIndexEntry()
else:
entry = ArchiveIndexEntry()
self.entry_cache[rec_type] = entry
return entry
def create_record_iter(self, arcv_iter):
append_post = self.options.get('append_post')
include_all = self.options.get('include_all')
block_size = self.options.get('block_size', 16384)
surt_ordered = self.options.get('surt_ordered', True)
minimal = self.options.get('minimal')
append_post = self.options.get('append_post')
if append_post and minimal:
raise Exception('Sorry, minimal index option and ' +
'append POST options can not be used together')
for record in arcv_iter.iter_records(block_size):
entry = None
if not include_all and not minimal and (record.status_headers.get_statuscode() == '-'):
continue
if record.format == 'warc':
if (record.rec_type in ('request', 'warcinfo') and
not include_all and
not append_post):
continue
elif (not include_all and
record.content_type == 'application/warc-fields'):
continue
entry = self.parse_warc_record(record)
elif record.format == 'arc':
entry = self.parse_arc_record(record)
if not entry:
continue
if entry.get('url') and not entry.get('urlkey'):
entry['urlkey'] = canonicalize(entry['url'], surt_ordered)
compute_digest = False
if (entry.get('digest', '-') == '-' and
record.rec_type not in ('revisit', 'request', 'warcinfo')):
compute_digest = True
elif not minimal and record.rec_type == 'request' and append_post:
method = record.status_headers.protocol
len_ = record.status_headers.get_header('Content-Length')
post_query = extract_post_query(method,
entry.get('mime'),
len_,
record.stream)
entry['_post_query'] = post_query
arcv_iter.read_to_end(record, compute_digest)
entry.set_rec_info(*arcv_iter.member_info)
entry.record = record
yield entry
def join_request_records(self, entry_iter):
prev_entry = None
for entry in entry_iter:
if not prev_entry:
prev_entry = entry
continue
# check for url match
if (entry['url'] != prev_entry['url']):
pass
# check for concurrency also
elif (entry.record.rec_headers.get_header('WARC-Concurrent-To') !=
prev_entry.record.rec_headers.get_header('WARC-Record-ID')):
pass
elif (entry.merge_request_data(prev_entry, self.options) or
prev_entry.merge_request_data(entry, self.options)):
yield prev_entry
yield entry
prev_entry = None
continue
yield prev_entry
prev_entry = entry
if prev_entry:
yield prev_entry
#=================================================================
def parse_warc_record(self, record):
""" Parse warc record
"""
entry = self._create_index_entry(record.rec_type)
if record.rec_type == 'warcinfo':
entry['url'] = record.rec_headers.get_header('WARC-Filename')
entry['urlkey'] = entry['url']
entry['_warcinfo'] = record.stream.read(record.length)
return entry
entry['url'] = record.rec_headers.get_header('WARC-Target-Uri')
# timestamp
entry['timestamp'] = iso_date_to_timestamp(record.rec_headers.
get_header('WARC-Date'))
# mime
if record.rec_type == 'revisit':
entry['mime'] = 'warc/revisit'
elif self.options.get('minimal'):
entry['mime'] = '-'
else:
def_mime = '-' if record.rec_type == 'request' else 'unk'
entry.extract_mime(record.status_headers.
get_header('Content-Type'),
def_mime)
# status -- only for response records (by convention):
if record.rec_type == 'response' and not self.options.get('minimal'):
entry.extract_status(record.status_headers)
else:
entry['status'] = '-'
# digest
digest = record.rec_headers.get_header('WARC-Payload-Digest')
entry['digest'] = digest
if digest and digest.startswith('sha1:'):
entry['digest'] = digest[len('sha1:'):]
elif not entry.get('digest'):
entry['digest'] = '-'
# optional json metadata, if present
metadata = record.rec_headers.get_header('WARC-Json-Metadata')
if metadata:
entry['metadata'] = metadata
return entry
#=================================================================
def parse_arc_record(self, record):
""" Parse arc record
"""
if record.rec_type == 'arc_header':
return None
url = record.rec_headers.get_header('uri')
url = url.replace('\r', '%0D')
url = url.replace('\n', '%0A')
# replace formfeed
url = url.replace('\x0c', '%0C')
# replace nulls
url = url.replace('\x00', '%00')
entry = self._create_index_entry(record.rec_type)
entry['url'] = url
# timestamp
entry['timestamp'] = record.rec_headers.get_header('archive-date')
if len(entry['timestamp']) > 14:
entry['timestamp'] = entry['timestamp'][:14]
if not self.options.get('minimal'):
# mime
entry.extract_mime(record.rec_headers.get_header('content-type'))
# status
entry.extract_status(record.status_headers)
# digest
entry['digest'] = '-'
return entry
def __call__(self, fh):
aiter = ArchiveIterator(fh, self.options.get('minimal', False),
self.options.get('verify_http', False))
entry_iter = self.create_record_iter(aiter)
if self.options.get('append_post'):
entry_iter = self.join_request_records(entry_iter)
for entry in entry_iter:
if (entry.record.rec_type in ('request', 'warcinfo') and
not self.options.get('include_all')):
continue
yield entry
class ArchiveIndexEntry(ArchiveIndexEntryMixin, dict):
pass
class OrderedArchiveIndexEntry(ArchiveIndexEntryMixin, OrderedDict):
pass
| gpl-3.0 | 1,864,010,479,243,279,400 | 30.395349 | 99 | 0.535825 | false |
matpalm/cartpoleplusplus | dqn_cartpole.py | 1 | 2303 | #!/usr/bin/env python
# copy pasta from https://github.com/matthiasplappert/keras-rl/blob/master/examples/dqn_cartpole.py
# with some extra arg parsing
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import bullet_cartpole
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num-train', type=int, default=100)
parser.add_argument('--num-eval', type=int, default=0)
parser.add_argument('--load-file', type=str, default=None)
parser.add_argument('--save-file', type=str, default=None)
bullet_cartpole.add_opts(parser)
opts = parser.parse_args()
print "OPTS", opts
ENV_NAME = 'BulletCartpole'
# Get the environment and extract the number of actions.
env = bullet_cartpole.BulletCartpole(opts=opts, discrete_actions=True)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(32))
model.add(Activation('tanh'))
#model.add(Dense(16))
#model.add(Activation('relu'))
#model.add(Dense(16))
#model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
if opts.load_file is not None:
print "loading weights from from [%s]" % opts.load_file
dqn.load_weights(opts.load_file)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=opts.num_train, visualize=True, verbose=2)
# After training is done, we save the final weights.
if opts.save_file is not None:
print "saving weights to [%s]" % opts.save_file
dqn.save_weights(opts.save_file, overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=opts.num_eval, visualize=True)
| mit | 8,919,449,101,382,390,000 | 31.9 | 99 | 0.752497 | false |
CTSNE/NodeDefender | NodeDefender/config/database.py | 1 | 3997 | import NodeDefender
import flask_migrate
import sqlalchemy
import os
from flask_sqlalchemy import SQLAlchemy
import alembic
import shutil
import pip
default_config = {'engine' : '',
'username' : '',
'password' : '',
'host' : '',
'port' : '',
'database' : '',
'filepath' : 'nodedefender.sql'}
config = default_config.copy()
def load_config(parser):
config.update(parser['DATABASE'])
NodeDefender.app.config.update(
DATABASE_ENGINE=config['engine'],
DATABASE_USERNAME=config['username'],
DATABASE_PASSWORD=config['password'],
DATABASE_HOST=config['host'],
DATABASE_PORT=config['port'],
DATABASE_DATABASE=config['database'],
DATABASE_FILEPATH=config['filepath'])
if NodeDefender.app.testing:
NodeDefender.app.config.update(
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:")
else:
NodeDefender.app.config.update(
SQLALCHEMY_DATABASE_URI = get_uri())
return config
def test_database():
app = NodeDefender.app
app.config.update(
SQLALCHEMY_DATABASE_URI = get_uri())
db = NodeDefender.db.sql.load(app)
folder = NodeDefender.config.migrations_folder
migrate = flask_migrate.Migrate(app, db, folder)
try:
init_migrations(app)
except alembic.util.exc.CommandError:
drop_alembic_table(db)
remove_migrations_folder(folder)
init_migrations(app)
try:
migrate_database(app)
upgrade_database(app)
except Exception:
pass
return True
def drop_alembic_table(db):
query = sqlalchemy.text("drop table alembic_version")
try:
db.engine.execute(query)
except Exception:
pass
return True
def remove_migrations_folder(folder):
try:
shutil.rmtree(folder)
except FileNotFoundError:
pass
return True
def init_migrations(app):
with app.app_context():
flask_migrate.init()
def migrate_database(app):
with app.app_context():
flask_migrate.migrate()
def upgrade_database(app):
with app.app_context():
flask_migrate.upgrade()
def install_mysql():
try:
import pip
except ImportError:
if not pip.main(['install', 'pymysql']):
return False
return True
def install_postgresql():
if pip.main(['install', 'psycopg2']):
return True
return False
def get_uri():
if config['engine'] == 'sqlite':
return 'sqlite:///' + config['filepath']
username = config['username']
password = config['password']
host = config['host']
port = config['port']
database = config['database']
if config['engine'] == 'mysql':
return 'mysql+pymysql://'+username+':'+password+'@'+host+':'+port+\
'/'+database
elif config['engine'] == 'postgresql':
return 'postgresql://'+username+':'+password+'@'+host+':'+port+\
'/'+database()
return "sqlite:///:memory:"
def set_default():
for key, value in default_config.items():
NodeDefender.config.parser['DATABASE'][key] = str(value)
return True
def set(**kwargs):
for key, value in kwargs.items():
if key not in config:
continue
if key == "filepath" and value is not None:
value = os.path.join(NodeDefender.config.datafolder, value)
if key == 'engine' and value == 'postgresql':
if not install_postgresql():
raise ImportError("Not able to install PostgreSQL\
Please verify that libpq-dev is installed")
if key == 'engine' and value == 'mysql':
if not install_mysql():
raise ImportError("Not able to install MySQL")
config[key] = str(value)
test_database()
return True
def write():
NodeDefender.config.parser['DATABASE'] = config
NodeDefender.config.write()
| mit | -6,365,526,685,889,680,000 | 27.347518 | 75 | 0.597698 | false |
mufaddalq/cloudstack-datera-driver | tools/marvin/marvin/deployAndRun.py | 1 | 5113 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tcExecuteEngine import TestCaseExecuteEngine
import sys
import os
import traceback
import time
from argparse import ArgumentParser
from marvinInit import MarvinInit
from marvin.codes import (SUCCESS,
FAILED,
EXCEPTION,
UNKNOWN_ERROR
)
parser = None
def printAndExit():
'''
Prints pretty message for parser and exit
'''
global parser
if parser is not None:
parser.print_usage()
exit(1)
def parseAndCheck():
'''
Parses,reads the options and verifies for the config file
'''
global parser
parser = ArgumentParser()
parser.add_argument("-d", "--tcpath", dest="tcpath",
help="the test case directory or file path")
parser.add_argument("-c", "--config", action="store",
default="./datacenterCfg", dest="config",
help="the path where the json config file generated,\
by default is ./datacenterCfg")
parser.add_argument("-l", "--load", dest="load", action="store_true",
help="only load config, do not deploy,\
it will only run testcase")
parser.add_argument("-n", "--num", dest="number",
help="how many times you want to run the tests")
options = parser.parse_args()
cfg_file = options.config
tc_path = options.tcpath
load_flag = options.load
num_iter = 1 if options.number is None else int(options.number)
'''
Check if the config file is None or not and exit accordingly
'''
if cfg_file is None:
printAndExit()
return {"cfg_file": cfg_file,
"load_flag": load_flag,
"tc_path": tc_path,
"num_iter": num_iter}
def startMarvin(cfg_file, load_flag):
'''
Initialize the Marvin
'''
try:
obj_marvininit = MarvinInit(cfg_file, load_flag)
if obj_marvininit.init() == SUCCESS:
testClient = obj_marvininit.getTestClient()
tcRunLogger = obj_marvininit.getLogger()
parsedConfig = obj_marvininit.getParsedConfig()
debugStream = obj_marvininit.getDebugFile()
return {"tc_client": testClient,
"tc_runlogger": tcRunLogger,
"tc_parsedcfg": parsedConfig,
"tc_debugstream": debugStream}
else:
print "\nMarvin Initialization Failed"
exit(1)
except Exception, e:
print "\n Exception occurred while starting Marvin %s" % str(e)
exit(1)
def runTCs(num_iter, inp1, inp2):
'''
Run Test Cases based upon number of iterations
'''
n = 0
while(n < num_iter):
engine = TestCaseExecuteEngine(inp2["tc_client"],
inp2["tc_parsedcfg"],
inp2["tc_runlogger"],
inp2["tc_debugstream"])
if inp1["tc_file"] is not None:
engine.loadTestsFromFile(inp1["tc_file"])
else:
engine.loadTestsFromDir(inp1["tc_dir"])
engine.run()
n = n + 1
def checkTCPath(tc_path):
'''
Verifies if the tc_path is a folder or file and its existence
'''
ret = {"tc_file": None, "tc_dir": None}
check = True
if tc_path is None:
printAndExit()
else:
if os.path.isfile(tc_path):
ret["tc_file"] = tc_path
elif os.path.isdir(tc_path):
ret["tc_dir"] = tc_path
else:
check = False
if check is False:
print"\nTC Path is Invalid.So Exiting"
exit(1)
return ret
if __name__ == "__main__":
'''
1. Parse and Check
'''
out1 = parseAndCheck()
print "\nStep1 :Parsing Options And Check Went Fine"
'''
2. Start Marvin
'''
out2 = startMarvin(out1["cfg_file"], out1["load_flag"])
print "\nStep2: Marvin Initialization Went Fine"
'''
3. Check TC folder or Module and Path existence
'''
out3 = checkTCPath(out1["tc_path"])
print "\nStep3: TC Path Check Went Fine"
'''
4. Run TCs
'''
runTCs(out1["num_iter"], out3, out2)
print "\nStep4: TC Running Finished"
| apache-2.0 | 4,409,944,793,403,968,000 | 29.616766 | 77 | 0.580677 | false |
franciscouzo/crosswordly | app/app/models.py | 1 | 1735 | import string
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
class UserProfile(models.Model):
user = models.OneToOneField(User)
score = models.IntegerField(default=0)
stars = models.IntegerField(default=0)
letters = models.CharField(max_length=8, blank=True)
x = models.IntegerField(default=0)
y = models.IntegerField(default=0)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.get_or_create(user=instance)
class Word(models.Model):
word = models.CharField(max_length=15, unique=True)
used = models.IntegerField(default=0)
def __str__(self):
return self.word
class WordHistory(models.Model):
user = models.ForeignKey(User)
word = models.ForeignKey(Word)
score = models.IntegerField()
x = models.IntegerField()
y = models.IntegerField()
def __str__(self):
return '{} - {} ({}, {})'.format(self.user, self.word, self.x, self.y)
class Cell(models.Model):
user = models.ForeignKey(User)
x = models.IntegerField()
y = models.IntegerField()
letter = models.CharField(
choices=[(c, c) for c in string.ascii_lowercase], max_length=1)
datetime = models.DateTimeField(auto_now=True)
def __str__(self):
return '{} - {} ({}, {})'.format(
self.user, self.letter, self.x, self.y)
class Meta:
unique_together = ('x', 'y')
for cls in [UserProfile, Word, WordHistory, Cell]:
admin.site.register(cls)
| gpl-3.0 | -3,721,119,490,238,843,400 | 26.539683 | 78 | 0.665706 | false |
AdrianGaudebert/elmo | apps/privacy/migrations/0001_initial.py | 1 | 5309 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Policy'
db.create_table('privacy_policy', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('privacy', ['Policy'])
# Adding model 'Comment'
db.create_table('privacy_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('policy', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['privacy.Policy'])),
('who', self.gf('django.db.models.fields.related.ForeignKey')(related_name='privacy_comments', to=orm['auth.User'])),
))
db.send_create_signal('privacy', ['Comment'])
def backwards(self, orm):
# Deleting model 'Policy'
db.delete_table('privacy_policy')
# Deleting model 'Comment'
db.delete_table('privacy_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'privacy.comment': {
'Meta': {'object_name': 'Comment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'policy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['privacy.Policy']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'privacy_comments'", 'to': "orm['auth.User']"})
},
'privacy.policy': {
'Meta': {'object_name': 'Policy'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['privacy'] | mpl-2.0 | 566,294,100,273,939,260 | 58.662921 | 182 | 0.557355 | false |
Erotemic/hotspotter | hotspotter/feature_compute2.py | 1 | 7452 | ''' Computes feature representations '''
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[fc2]')
# scientific
import numpy as np
# python
from os.path import join
# hotspotter
from hscom import helpers as util
from hscom import params
from hscom import fileio as io
from hscom.Parallelize import parallel_compute
import extern_feat
def whiten_features(desc_list):
import algos
print('[fc2] * Whitening features')
ax2_desc = np.vstack(desc_list)
ax2_desc_white = algos.scale_to_byte(algos.whiten(ax2_desc))
index = 0
offset = 0
for cx in xrange(len(desc_list)):
old_desc = desc_list[cx]
print ('[fc2] * ' + util.info(old_desc, 'old_desc'))
offset = len(old_desc)
new_desc = ax2_desc_white[index:(index + offset)]
desc_list[cx] = new_desc
index += offset
# =======================================
# Main Script
# =======================================
@profile
def bigcache_feat_save(cache_dir, uid, ext, kpts_list, desc_list):
print('[fc2] Caching desc_list and kpts_list')
io.smart_save(kpts_list, cache_dir, 'kpts_list', uid, ext)
io.smart_save(desc_list, cache_dir, 'desc_list', uid, ext)
@profile
def bigcache_feat_load(cache_dir, uid, ext):
#io.debug_smart_load(cache_dir, fname='*', uid=uid, ext='.*')
kpts_list = io.smart_load(cache_dir, 'kpts_list', uid, ext, can_fail=True)
desc_list = io.smart_load(cache_dir, 'desc_list', uid, ext, can_fail=True)
if desc_list is None or kpts_list is None:
return None
desc_list = desc_list.tolist()
kpts_list = kpts_list.tolist()
print('[fc2] Loaded kpts_list and desc_list from big cache')
return kpts_list, desc_list
@profile
def sequential_feat_load(feat_cfg, feat_fpath_list):
kpts_list = []
desc_list = []
# Debug loading (seems to use lots of memory)
print('\n')
try:
nFeats = len(feat_fpath_list)
prog_label = '[fc2] Loading feature: '
mark_progress, end_progress = util.progress_func(nFeats, prog_label)
for count, feat_path in enumerate(feat_fpath_list):
try:
npz = np.load(feat_path, mmap_mode=None)
except IOError:
print('\n')
util.checkpath(feat_path, verbose=True)
print('IOError on feat_path=%r' % feat_path)
raise
kpts = npz['arr_0']
desc = npz['arr_1']
npz.close()
kpts_list.append(kpts)
desc_list.append(desc)
mark_progress(count)
end_progress()
print('[fc2] Finished load of individual kpts and desc')
except MemoryError:
print('\n------------')
print('[fc2] Out of memory')
print('[fc2] Trying to read: %r' % feat_path)
print('[fc2] len(kpts_list) = %d' % len(kpts_list))
print('[fc2] len(desc_list) = %d' % len(desc_list))
raise
if feat_cfg.whiten:
desc_list = whiten_features(desc_list)
return kpts_list, desc_list
# Maps a preference string into a function
feat_type2_precompute = {
'hesaff+sift': extern_feat.precompute_hesaff,
}
@profile
def _load_features_individualy(hs, cx_list):
use_cache = not params.args.nocache_feats
feat_cfg = hs.prefs.feat_cfg
feat_dir = hs.dirs.feat_dir
feat_uid = feat_cfg.get_uid()
print('[fc2] Loading ' + feat_uid + ' individually')
# Build feature paths
rchip_fpath_list = [hs.cpaths.cx2_rchip_path[cx] for cx in iter(cx_list)]
cid_list = hs.tables.cx2_cid[cx_list]
feat_fname_fmt = ''.join(('cid%d', feat_uid, '.npz'))
feat_fpath_fmt = join(feat_dir, feat_fname_fmt)
feat_fpath_list = [feat_fpath_fmt % cid for cid in cid_list]
#feat_fname_list = [feat_fname_fmt % cid for cid in cid_list]
# Compute features in parallel, saving them to disk
kwargs_list = [feat_cfg.get_dict_args()] * len(rchip_fpath_list)
pfc_kwargs = {
'func': feat_type2_precompute[feat_cfg.feat_type],
'arg_list': [rchip_fpath_list, feat_fpath_list, kwargs_list],
'num_procs': params.args.num_procs,
'lazy': use_cache,
}
parallel_compute(**pfc_kwargs)
# Load precomputed features sequentially
kpts_list, desc_list = sequential_feat_load(feat_cfg, feat_fpath_list)
return kpts_list, desc_list
@profile
def _load_features_bigcache(hs, cx_list):
# args for smart load/save
feat_cfg = hs.prefs.feat_cfg
feat_uid = feat_cfg.get_uid()
cache_dir = hs.dirs.cache_dir
sample_uid = util.hashstr_arr(cx_list, 'cids')
bigcache_uid = '_'.join((feat_uid, sample_uid))
ext = '.npy'
loaded = bigcache_feat_load(cache_dir, bigcache_uid, ext)
if loaded is not None: # Cache Hit
kpts_list, desc_list = loaded
else: # Cache Miss
kpts_list, desc_list = _load_features_individualy(hs, cx_list)
# Cache all the features
bigcache_feat_save(cache_dir, bigcache_uid, ext, kpts_list, desc_list)
return kpts_list, desc_list
@profile
@util.indent_decor('[fc2]')
def load_features(hs, cx_list=None, **kwargs):
# TODO: There needs to be a fast way to ensure that everything is
# already loaded. Same for cc2.
print('=============================')
print('[fc2] Precomputing and loading features: %r' % hs.get_db_name())
#----------------
# COMPUTE SETUP
#----------------
use_cache = not params.args.nocache_feats
use_big_cache = use_cache and cx_list is None
feat_cfg = hs.prefs.feat_cfg
feat_uid = feat_cfg.get_uid()
if hs.feats.feat_uid != '' and hs.feats.feat_uid != feat_uid:
print('[fc2] Disagreement: OLD_feat_uid = %r' % hs.feats.feat_uid)
print('[fc2] Disagreement: NEW_feat_uid = %r' % feat_uid)
print('[fc2] Unloading all chip information')
hs.unload_all()
hs.load_chips(cx_list=cx_list)
print('[fc2] feat_uid = %r' % feat_uid)
# Get the list of chip features to load
cx_list = hs.get_valid_cxs() if cx_list is None else cx_list
if not np.iterable(cx_list):
cx_list = [cx_list]
print('[cc2] len(cx_list) = %r' % len(cx_list))
if len(cx_list) == 0:
return # HACK
cx_list = np.array(cx_list) # HACK
if use_big_cache: # use only if all descriptors requested
kpts_list, desc_list = _load_features_bigcache(hs, cx_list)
else:
kpts_list, desc_list = _load_features_individualy(hs, cx_list)
# Extend the datastructure if needed
list_size = max(cx_list) + 1
util.ensure_list_size(hs.feats.cx2_kpts, list_size)
util.ensure_list_size(hs.feats.cx2_desc, list_size)
# Copy the values into the ChipPaths object
for lx, cx in enumerate(cx_list):
hs.feats.cx2_kpts[cx] = kpts_list[lx]
for lx, cx in enumerate(cx_list):
hs.feats.cx2_desc[cx] = desc_list[lx]
hs.feats.feat_uid = feat_uid
print('[fc2]=============================')
def clear_feature_cache(hs):
feat_cfg = hs.prefs.feat_cfg
feat_dir = hs.dirs.feat_dir
cache_dir = hs.dirs.cache_dir
feat_uid = feat_cfg.get_uid()
print('[fc2] clearing feature cache: %r' % feat_dir)
util.remove_files_in_dir(feat_dir, '*' + feat_uid + '*', verbose=True, dryrun=False)
util.remove_files_in_dir(cache_dir, '*' + feat_uid + '*', verbose=True, dryrun=False)
pass
| apache-2.0 | 4,390,036,601,219,088,000 | 35.891089 | 89 | 0.608427 | false |
Maselkov/GW2Bot | guildwars2/worldsync.py | 1 | 7774 | import asyncio
import discord
from discord.ext import commands, tasks
from .exceptions import APIError, APIKeyError
class WorldsyncMixin:
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@commands.group(case_insensitive=True)
async def worldsync(self, ctx):
"""Role management based on in game account world"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@worldsync.command(name="toggle")
async def worldsync_toggle(self, ctx):
"""Enable automatic world roles"""
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
guild = ctx.guild
doc = await self.bot.database.get(guild, self)
worldsync = doc.get("worldsync", {})
enabled = not worldsync.get("enabled", False)
world_role = guild.get_role(worldsync.get("world_role"))
ally_role = guild.get_role(worldsync.get("ally_role"))
world_id = worldsync.get("world_id")
if not world_role or not ally_role or not world_id and enabled:
return await ctx.send(
"You must set the home world, as well as world role and "
"ally role before you can enable worldsync\n```\n"
f"{ctx.prefix}worldsync world\n"
f"{ctx.prefix}worldsync worldrole\n"
f"{ctx.prefix}worldsync allyrole```")
await self.bot.database.set(guild, {"worldsync.enabled": enabled},
self)
if enabled:
await ctx.send("Worldsync is now enabled. Use the same "
"command to disable.")
doc = await self.bot.database.get(guild, self)
return await self.sync_worlds(worldsync, guild)
await ctx.send("Worldsync disabled")
@worldsync.command(name="world")
async def worldsync_world(self, ctx, *, world):
"""Set your home world"""
if not world:
return await ctx.send_help(ctx.command)
wid = await self.get_world_id(world)
if not wid:
return await ctx.send("Invalid world name")
await self.bot.database.set(ctx.guild, {"worldsync.world_id": wid},
self)
await ctx.send(f"World set! Use `{ctx.prefix}worldsync toggle` to "
"enable if you haven't already")
@worldsync.command(name="worldrole")
async def worldsync_worldrole(self, ctx, role: discord.Role):
"""Set the role to be given to those in the home world.
You can use role mention or ID"""
await self.bot.database.set(ctx.guild,
{"worldsync.world_role": role.id}, self)
await ctx.send("Role set. Make sure the bot has enough permissions "
"to grant the role.")
@worldsync.command(name="allyrole")
async def worldsync_allyrole(self, ctx, role: discord.Role):
"""Set the role to be given to those in the linked worlds.
You can use role mention or ID"""
await self.bot.database.set(ctx.guild,
{"worldsync.ally_role": role.id}, self)
await ctx.send("Role set. Make sure the bot has enough permissions "
"to grant the role.")
@worldsync.command(name="now")
async def worldsync_now(self, ctx):
"""Run the worldsync now"""
msg = await ctx.send("Starting worldsync." +
self.get_emoji(ctx, "loading"))
doc = await self.bot.database.get(ctx.guild, self)
worldsync = doc.get("worldsync", {})
enabled = worldsync.get("enabled", False)
if not enabled:
return await ctx.send("Worldsync is not enabled")
async with ctx.typing():
await self.sync_worlds(worldsync, ctx.guild)
await ctx.send("Worldsync complete")
try:
await msg.delete()
except discord.HTTPException:
pass
async def get_linked_worlds(self, world):
endpoint = f"wvw/matches/overview?world={world}"
results = await self.call_api(endpoint)
for worlds in results["all_worlds"].values():
if world in worlds:
worlds.remove(world)
return worlds
return []
async def worldsync_member(self, member, world_role, ally_role, world_id,
linked_worlds):
try:
on_world = False
on_linked = False
try:
results = await self.call_api("account", member)
user_world = results["world"]
if user_world == world_id:
on_world = True
if user_world in linked_worlds:
on_linked = True
except APIKeyError:
pass
except APIError:
return
single_role = world_role == ally_role
if on_world:
if world_role not in member.roles:
await member.add_roles(world_role)
if not single_role and ally_role in member.roles:
await member.remove_roles(ally_role)
return
if on_linked:
if ally_role not in member.roles:
await member.add_roles(ally_role)
if not single_role and world_role in member.roles:
await member.remove_roles(world_role)
return
if world_role in member.roles:
await member.remove_roles(world_role)
if ally_role in member.roles:
await member.remove_roles(ally_role)
except:
pass
async def sync_worlds(self, doc, guild):
world_id = doc.get("world_id")
try:
linked_worlds = await self.get_linked_worlds(world_id)
except APIError as e:
return
world_role = guild.get_role(doc.get("world_role"))
ally_role = guild.get_role(doc.get("ally_role"))
if not world_role or not ally_role:
return
for member in guild.members:
if member.bot:
continue
await self.worldsync_member(member, world_role, ally_role,
world_id, linked_worlds)
await asyncio.sleep(0.25)
@commands.Cog.listener("on_member_join")
async def worldsync_on_member_join(self, member):
if member.bot:
return
guild = member.guild
doc = await self.bot.database.get(guild, self)
worldsync = doc.get("worldsync", {})
enabled = worldsync.get("enabled", False)
if not enabled:
return
world_role = guild.get_role(worldsync.get("world_role"))
ally_role = guild.get_role(worldsync.get("ally_role"))
if not world_role or not ally_role:
return
world_id = worldsync.get("world_id")
try:
linked_worlds = await self.get_linked_worlds(world_id)
except APIError as e:
return
await self.worldsync_member(member, world_role, ally_role, world_id,
linked_worlds)
@tasks.loop(minutes=5)
async def worldsync_task(self):
cursor = self.bot.database.iter("guilds", {"worldsync.enabled": True},
self,
subdocs=["worldsync"])
async for doc in cursor:
try:
await self.sync_worlds(doc, doc["_obj"])
except asyncio.CancelledError:
return
except Exception as e:
pass
| mit | 8,110,200,218,613,507,000 | 39.489583 | 78 | 0.551839 | false |
stevarino/cmsc495 | mac_app/forms.py | 1 | 1837 | from django import forms
from django.contrib.auth.models import User
from .models import Department
class NewUserTicket(forms.Form):
username = forms.CharField(label='Username', max_length=32)
password = forms.CharField(label='Password', widget=forms.PasswordInput)
firstname = forms.CharField(label='First Name', max_length=32, required=False)
lastname = forms.CharField(label='Last Name', max_length=32, required=False)
address = forms.CharField(max_length=256, required=False)
city = forms.CharField(max_length=128, required=False)
state = forms.CharField(max_length=128, required=False)
postal_code = forms.CharField(max_length=16, required=False)
phone = forms.CharField(max_length=16, required=False)
department = forms.ModelChoiceField(Department.objects.all())
# form validator to ensure unique username
def clean_username(self):
username = self.cleaned_data['username']
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u'Username "{}" is already in use.'.format(username))
class UserSearchForm(forms.Form):
username = forms.CharField(label='Username', max_length=32, required=False)
first_name = forms.CharField(label='First Name', max_length=32, required=False)
last_name = forms.CharField(label='Last Name', max_length=32, required=False)
def get_users(self):
users = User.objects
is_filtered = False
for f in ['first_name', 'last_name', 'username']:
if self.cleaned_data[f]:
is_filtered = True
users = users.filter(**{
f+'__icontains': self.cleaned_data[f]
})
if is_filtered:
return users
return [] | mit | -1,642,018,643,992,884,500 | 40.772727 | 89 | 0.663582 | false |
CanalTP/navitia | source/navitiacommon/navitiacommon/parser_args_type.py | 1 | 9951 | # encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from collections import namedtuple
import ujson
import geojson
import flask
from dateutil import parser
from flask_restful.inputs import boolean
import six
import sys
class TypeSchema(object):
def __init__(self, type=None, metadata=None):
self.type = type
self.metadata = metadata
class CustomSchemaType(object):
def schema(self):
# by default we look for a _schema variable, but it can be overriden
return self._schema
class DepthArgument(CustomSchemaType):
def __call__(self, value, name):
conv_value = int(value)
if conv_value > 3:
raise ValueError("The {} argument has to be <= 3, you gave : {}".format(name, value))
return conv_value
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 0, 'maximum': 3})
class PositiveFloat(CustomSchemaType):
def __call__(self, value, name):
conv_value = float(value)
if conv_value <= 0:
raise ValueError("The {} argument has to be > 0, you gave : {}".format(name, value))
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class IntRange(CustomSchemaType):
def __init__(self, min, max):
self.min = min
self.max = max
def __call__(self, value, name):
conv_value = int(value)
if not self.min <= conv_value <= self.max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, self.min, self.max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class FloatRange(CustomSchemaType):
def __init__(self, min, max):
self.min = min
self.max = max
def __call__(self, value, name):
conv_value = float(value)
if not self.min <= conv_value <= self.max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, self.min, self.max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class SpeedRange(CustomSchemaType):
map_range = {
'bike_speed': (0.01, 15),
'bss_speed': (0.01, 15),
'walking_speed': (0.01, 4),
'car_speed': (0.01, 50),
'taxi_speed': (0.01, 50),
'car_no_park_speed': (0.01, 50),
'ridesharing_speed': (0.01, 50),
'default': (sys.float_info.min, sys.float_info.max),
}
def __call__(self, value, name):
conv_value = float(value)
(range_min, range_max) = (
SpeedRange.map_range[name] if name in SpeedRange.map_range else SpeedRange.map_range['default']
)
if not range_min <= conv_value <= range_max:
raise ValueError(
"The {} argument has to be in range [{}, {}], you gave : {}".format(
name, range_min, range_max, value
)
)
return conv_value
def schema(self):
return TypeSchema(type=float, metadata={'minimum': 0})
class BooleanType(CustomSchemaType):
def __call__(self, value):
if isinstance(value, bool):
return value
return boolean(value)
def schema(self):
return TypeSchema(type=bool)
class OptionValue(CustomSchemaType):
def __init__(self, optional_values):
self.optional_values = optional_values
def __call__(self, value, name):
# if input value is iterable
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
if not all((v in self.optional_values for v in value)):
error = "The {} argument must be in list {}, you gave {}".format(
name, str(self.optional_values), value
)
raise ValueError(error)
elif not (value in self.optional_values):
error = "The {} argument must be in list {}, you gave {}".format(
name, str(self.optional_values), value
)
raise ValueError(error)
return value
def schema(self):
return TypeSchema(type=str, metadata={'enum': self.optional_values})
class DescribedOptionValue(OptionValue):
def __init__(self, optional_values):
self.description = "Possible values:\n"
self.description += '\n'.join([" * '{}' - {}".format(k, v) for k, v in optional_values.items()])
super(DescribedOptionValue, self).__init__(optional_values.keys())
def schema(self):
ts = super(DescribedOptionValue, self).schema()
ts.metadata['description'] = self.description
return ts
class IntervalValue(CustomSchemaType):
def __init__(self, type=int, min_value=None, max_value=None):
self.type = type
self.min_value = min_value
self.max_value = max_value
def __call__(self, value, name):
v = self.type(value)
if self.min_value:
v = max(v, self.min_value)
if self.max_value:
v = min(v, self.max_value)
return v
def schema(self):
metadata = {}
if self.min_value:
metadata['minimum'] = self.min_value
if self.max_value:
metadata['maximum'] = self.max_value
return TypeSchema(type=self.type, metadata=metadata)
def geojson_argument(value):
def is_geometry_valid(geometry):
geometry_str = ujson.dumps(geometry)
valid = geojson.is_valid(geojson.loads(geometry_str))
return 'valid' in valid and (valid['valid'] == 'yes' or valid['valid'] == '')
if value:
if not isinstance(value, dict):
raise ValueError('invalid json')
if not is_geometry_valid(value):
raise ValueError('invalid geojson')
geometry = value.get('geometry', {}).get('type')
if not geometry or geometry.lower() != 'polygon':
raise ValueError('invalid geometry type')
return value
class CoordFormat(CustomSchemaType):
def __init__(self, nullable=False):
super(CoordFormat, self).__init__()
self.nullable = nullable
def __call__(self, coord):
"""
Validate coordinates format (lon;lat)
"""
if coord == '' and self.nullable:
return coord
lon_lat_splitted = coord.split(";")
if len(lon_lat_splitted) != 2:
raise ValueError('Invalid coordinate parameter. It must be lon;lat where lon and lat are floats.')
lon, lat = lon_lat_splitted
lat = float(lat)
if not (-90.0 <= lat <= 90.0):
raise ValueError("lat should be between -90 and 90")
lon = float(lon)
if not (180.0 >= lon >= -180.0):
raise ValueError("lon should be between -180 and 180")
return coord
def schema(self):
return TypeSchema(type=str, metadata={'pattern': '.*;.*'})
class UnsignedInteger(CustomSchemaType):
def __call__(self, value):
try:
d = int(value)
if d < 0:
raise ValueError('invalid unsigned int')
return d
except ValueError as e:
raise ValueError("Unable to evaluate, {}".format(e))
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 0})
class PositiveInteger(CustomSchemaType):
def __call__(self, value):
try:
d = int(value)
if d <= 0:
raise ValueError('invalid positive int')
return d
except ValueError as e:
raise ValueError("Unable to evaluate, {}".format(e))
def schema(self):
return TypeSchema(type=int, metadata={'minimum': 1})
def _parse_input_date(date):
"""
datetime parse date seems broken, '155' with format '%H%M%S' is not
rejected but parsed as 1h, 5mn, 5s...
so use use for the input date parse dateutil even if the 'guess'
mechanism seems a bit dangerous
"""
return parser.parse(date, dayfirst=False, yearfirst=True)
class DateTimeFormat(CustomSchemaType):
def __call__(self, value):
"""
we want to valid the date format
"""
try:
d = _parse_input_date(value)
if d.year <= 1970:
raise ValueError('date is too early!')
return d
except ValueError as e:
raise ValueError("Unable to parse datetime, {}".format(e))
def schema(self):
return TypeSchema(type=str, metadata={'format': 'date-time'})
| agpl-3.0 | 2,071,259,826,487,371,000 | 30.490506 | 110 | 0.592604 | false |
hohoins/ml | hunkim/ml_lab_10.py | 1 | 4364 | # 참고자료
# 모두를 위한 머신러닝/딥러닝 강의
# 홍콩과기대 김성훈
# http://hunkim.github.io/ml
from __future__ import print_function
import tensorflow as tf
import random as ran
import matplotlib.pyplot as plt
import math
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
# 6 was used in the paper.
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Parameters
learning_rate = 0.0006
training_epochs = 60
batch_size = 512
display_step = 1
# tf Graph Input
X = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
Y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
# Set model weights
dropout_rate = tf.placeholder(tf.float32)
W1 = tf.get_variable("W1", shape=[784, 512], initializer=xavier_init(784, 512))
W2 = tf.get_variable("W2", shape=[512, 512], initializer=xavier_init(512, 512))
W3 = tf.get_variable("W3", shape=[512, 512], initializer=xavier_init(512, 512))
W4 = tf.get_variable("W4", shape=[512, 256], initializer=xavier_init(512, 256))
W5 = tf.get_variable("W5", shape=[256, 256], initializer=xavier_init(256, 256))
W6 = tf.get_variable("W6", shape=[256, 256], initializer=xavier_init(256, 256))
W8 = tf.get_variable("W8", shape=[256, 10], initializer=xavier_init(256, 10))
b1 = tf.Variable(tf.random_normal([512]))
b2 = tf.Variable(tf.random_normal([512]))
b3 = tf.Variable(tf.random_normal([512]))
b4 = tf.Variable(tf.random_normal([256]))
b5 = tf.Variable(tf.random_normal([256]))
b6 = tf.Variable(tf.random_normal([256]))
b8 = tf.Variable(tf.random_normal([10]))
# Construct model
_L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(_L1, dropout_rate)
_L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(_L2, dropout_rate)
_L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(_L3, dropout_rate)
_L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(_L4, dropout_rate)
_L5 = tf.nn.relu(tf.matmul(L4, W5) + b5)
L5 = tf.nn.dropout(_L5, dropout_rate)
_L6 = tf.nn.relu(tf.matmul(L5, W6) + b6)
L6 = tf.nn.dropout(_L6, dropout_rate)
hypothesis = tf.matmul(L6, W8) + b8
# Minimize error using cross entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=hypothesis))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
# Compute average loss
c = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels, dropout_rate: 1}))
r = ran.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r+1], 1)))
print("Predication: ", sess.run(tf.argmax(hypothesis, 1), {X: mnist.test.images[r:r+1], dropout_rate: 1}))
plt.imshow(mnist.test.images[r:r+1].reshape(28, 28), cmap='Greys', interpolation='nearest')
plt.show()
| apache-2.0 | -5,957,714,743,389,071,000 | 35.542373 | 110 | 0.669991 | false |
lwerdna/alib | py/bytes.py | 1 | 8254 | #!/usr/bin/python
#------------------------------------------------------------------------------
#
# Copyright 2011-2016 Andrew Lamoureux
#
# This file is a part of autils.
#
# autils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
import os
import sys
import re
from struct import pack, unpack
import string
sys.path.append(os.environ['PATH_AUTILS'])
from parsing import *
regex_hex_int = r'^(?:0x)?[a-fA-F0-9]{1,16}$'
# this is useful for parsing output from objdump, which can come
# as a list of bytes, list of words, etc.
#
# bytes example (x86_64):
# 40051c: 55 push %rbp
# 40051d: 48 89 e5 mov %rsp,%rbp
# 400520: bf d4 05 40 00 mov $0x4005d4,%edi
#
# words example (arm thumb):
# 1928: 2a00 cmp r2, #0
# 192a: d031 beq.n 1990 <.text+0x150>
# 192c: f8d8 300c ldr.w r3, [r8, #12]
#
#------------------------------------------------------------------------------
# binary data to various string representations
#------------------------------------------------------------------------------
def getHexDump(data, addr=0, grouping=1, endian='little'):
result = ''
while(data):
ascii = ''
buff16 = data[0:16]
data = data[16:]
result += "%08X: " % addr
i = 0
while i < 16:
if(i < len(buff16)):
f0 = { \
'big': {1:'>B', 2:'>H', 4:'>I', 8:'>Q'}, \
'little': {1:'<B', 2:'<H', 4:'<I', 8:'<Q'} \
}
f1 = { \
1:'%02X ', 2:'%04X ', 4:'%08X ', 8:'%016X ' \
}
temp = unpack(f0[endian][grouping], buff16[i:i+grouping])[0]
result += f1[grouping] % temp
for j in range(grouping):
if(buff16[i+j] >= ' ' and buff16[i+j] <= '~'):
ascii += buff16[i+j]
else:
ascii += '.'
else:
if grouping == 1:
result += ' '*len('DE ')
elif grouping == 2:
result += ' '*len('DEAD ')
elif grouping == 4:
result += ' '*len('DEADBEEF ')
elif grouping == 8:
result += ' '*len('DEADBEEFCAFEBABE ')
i += grouping
result += ' %s\n' % ascii
addr += 16;
return result
def getGdbWrites(addr, data):
result = ''
while(data):
if(len(data) >= 4):
result += 'set *(unsigned int *)0x%X = 0x%X\n' % \
(addr, unpack('I',data[0:4])[0])
data = data[4:]
addr += 4
elif(len(data) >= 2):
result += 'set *(unsigned short *)0x%X = 0x%X\n' % \
(addr, unpack('H',data[0:2])[0])
data = data[2:]
addr += 2
elif(len(data) == 1):
result += 'set *(unsigned char *)0x%X = 0x%X\n' % \
(addr, unpack('B',data[0:1])[0])
data = data[1:]
addr += 1
else:
print 'IMPOSSIBLE!'
return result;
def getIdaPatchIdc(addr, data):
result = ''
while(data):
if(len(data) >= 4):
result += 'PatchDword(0x%X, 0x%X);\n' % \
(addr, unpack('I',data[0:4])[0])
data = data[4:]
addr += 4
elif(len(data) >= 2):
result += 'PatchWord(0x%X, 0x%X);\n' % \
(addr, unpack('H',data[0:2])[0])
data = data[2:]
addr += 2
elif(len(data) == 1):
result += 'PatchByte(0x%X, 0x%X);\n' % \
(addr, unpack('B',data[0:1])[0])
data = data[1:]
addr += 1
else:
result += 'IMPOSSIBLE!'
return result
def getCString(data):
result = ''
count = 0
group16 = ''
while(data):
group16 += "\\x%02X" % unpack('B', data[0])[0]
data = data[1:]
count += 1
if count == 16:
result += '"%s"\n' % group16
group16 = ''
count = 0
if group16:
result += '"%s"' % group16
return result
def getPythonString(data):
temp = getCString(data)
temp = re.sub("\n", " + \\\n", temp)
return temp
def getStrAsHex(s, spaced=False):
raise Exception("use binascii.hexlify() or foo.encode('hex') instead")
#------------------------------------------------------------------------------
# bit access
#------------------------------------------------------------------------------
def getBits(val, hi, lo):
mask = (2**(hi+1) - 1) - (2**lo-1)
return (val & mask) >> lo
#------------------------------------------------------------------------------
# endian conversions
#------------------------------------------------------------------------------
def bswap32(val):
return unpack('>I', pack('<I', val))[0]
def bswap16(val):
return unpack('>H', pack('<H', val))[0]
#------------------------------------------------------------------------------
# bit byte calculations
#------------------------------------------------------------------------------
def dataXor(a, b):
assert(len(a)==len(b))
length = len(a)
result = ''
for i in range(length):
result += pack('B', ord(a[i]) ^ ord(b[i]))
return result
#------------------------------------------------------------------------------
# tests
#------------------------------------------------------------------------------
if __name__ == '__main__':
# test getFirstHexInt()
text = "" + \
"blah blah blah\n" + \
"blah blah 0xDEADBEEF blah\n" + \
"blah blah\n" + \
"0xCAFEBABEEE\n" + \
"derp werp\n" + \
"ree dee\n"
if(parseHexValue(text) == 0xDEADBEEF):
print "PASS!"
else:
print "FAIL!"
text = "" + \
"[R]ead [M]emory via [S]DIO\n" + \
"parsed address: 0x00000500\n" + \
"parsed len: 0x00000100\n" + \
"len = 0x100\n" + \
"addr = 0x500\n" + \
"partition = 0x0\n" + \
"00000500: A0 60 00 68 08 B1 47 F4 00 27 B8 F1 05 0F 18 BF .`.h..G..'......\n" + \
"00000510: 47 F0 80 77 00 2F 4F F0 01 07 0B D1 28 68 28 B1 G..w./O.....(h(.\n" + \
"00000520: 28 68 38 B1 30 46 1C F0 21 FC 18 B1 17 B1 0A F0 (h8.0F..!.......\n" + \
"00000530: B1 FC 1E E0 01 20 29 F0 59 FB 21 F0 FD FF 16 E0 ..... ).Y.!.....\n"
if(parseBytes(text) == "" + \
"\xA0\x60\x00\x68\x08\xB1\x47\xF4\x00\x27\xB8\xF1\x05\x0F\x18\xBF" + \
"\x47\xF0\x80\x77\x00\x2F\x4F\xF0\x01\x07\x0B\xD1\x28\x68\x28\xB1" + \
"\x28\x68\x38\xB1\x30\x46\x1C\xF0\x21\xFC\x18\xB1\x17\xB1\x0A\xF0" + \
"\xB1\xFC\x1E\xE0\x01\x20\x29\xF0\x59\xFB\x21\xF0\xFD\xFF\x16\xE0"):
print "PASS!"
else:
print "FAIL!"
print parseBytes(text)
data = \
"\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x70\x79\x74\x68\x6f" + \
"\x6e\x0a\x23\x20\x32\x30\x31\x32\x20\x61\x6e\x64\x72\x65\x77\x6c" + \
"\x0a\x0a\x23\x20\x72\x6f\x75\x74\x69\x6e\x65\x73\x20\x66\x6f\x72" + \
"\x20\x70\x61\x72\x73\x69\x6e\x67\x2f\x70\x72\x6f\x63\x65\x73\x73" + \
"\x69\x6e\x67\x20\x62\x69\x74\x73\x2f\x62\x79\x74\x65\x73\x0a\x0a" + \
"\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x0a\x66\x72\x6f\x6d\x20\x73" + \
"\x74\x72\x75\x63\x74\x20\x69\x6d\x70\x6f\x72\x74\x20\x70\x61\x63" + \
"\x6b\x2c\x20\x75\x6e\x70\x61\x63\x6b\x0a\x69\x6d\x70\x6f\x72\x74" + \
"\x20\x73\x74\x72\x69\x6e\x67\x0a\x0a\x72\x65\x67\x65\x78\x5f\x68" + \
"\x65\x78\x5f\x69\x6e\x74\x20\x3d\x20\x72\x27\x5e\x28\x3f\x3a\x30" + \
"\x78\x29\x3f\x5b\x61\x2d\x66\x41\x2d\x46\x30\x2d\x39\x5d\x7b\x31" + \
"\x2c\x31\x36\x7d\x24\x27\x0a\x0a\x23\x20\x67\x72\x61\x62\x73\x20" + \
"\x66\x69\x72\x73\x74\x20\x70\x61\x72\x73\x65\x61\x62\x6c\x65\x20" + \
"\x68\x65\x78\x61\x64\x65\x63\x69\x6d\x61\x6c\x20\x69\x6e\x74\x65" + \
"\x67\x65\x72\x20\x66\x72\x6f\x6d\x20\x61\x20\x6c\x69\x6e\x65\x0a" + \
"\x23\x0a\x64\x65\x66\x20\x67\x65\x74\x46\x69\x72\x73\x74\x4c\x69"
print getHexDump(data, 0, grouping=1, endian='big')
print getHexDump(data, 0, grouping=2, endian='big')
print getHexDump(data, 0, grouping=4, endian='big')
print getHexDump(data, 0, grouping=8, endian='big')
print getHexDump(data, 0, grouping=1, endian='little')
print getHexDump(data, 0, grouping=2, endian='little')
print getHexDump(data, 0, grouping=4, endian='little')
print getHexDump(data, 0, grouping=8, endian='little')
print getGdbWrites(0, data)
print getIdaPatchIdc(0, data)
print getCString(data)
print getPythonString(data)
| gpl-3.0 | 1,060,310,704,577,615,400 | 28.478571 | 84 | 0.531015 | false |
mancoast/CPythonPyc_test | fail/331_test_inspect.py | 1 | 88168 | import re
import sys
import types
import unittest
import inspect
import linecache
import datetime
import collections
import os
import shutil
from os.path import normcase
from test.support import run_unittest, TESTFN, DirsOnSysPath
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# C module for test_findsource_binary
import unicodedata
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
# Normalize file names: on Windows, the case of file names of compiled
# modules depends on the path used to start the python executable.
modfile = normcase(modfile)
def revise(filename, *args):
return (normcase(filename),) + args
import builtins
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in range(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len([x for x in dir(inspect) if x.startswith('is')])
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
global tb
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.__code__')
try:
1/0
except:
tb = sys.exc_info()[2]
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.istraceback, 'tb')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
finally:
# Clear traceback and all the frames and local variables hanging to it.
tb = None
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.isfunction, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.isdatadescriptor, 'collections.defaultdict.default_factory')
self.istest(inspect.isgenerator, '(x for x in range(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(revise(*mod.st[0][1:]),
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(revise(*mod.st[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*mod.st[2][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*mod.st[3][1:]),
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(revise(*git.tr[0][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*git.tr[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*git.tr[2][1:]),
(modfile, 18, 'eggs', [' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, e=4, f=5, *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderModule = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderModule)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderModule = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(object, ()),
[(mod.ParrotDroppings, (object,)),
(mod.StupidGit, (object,)),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["builtins"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(normcase(inspect.getsourcefile(mod.spam)), modfile)
self.assertEqual(normcase(inspect.getsourcefile(git.abuse)), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
try:
self.assertEqual(normcase(inspect.getsourcefile(co)), fn)
finally:
del linecache.cache[co.co_filename]
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec("def x(): pass", m.__dict__)
self.assertEqual(inspect.getsourcefile(m.x.__code__), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(keepends=True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec(compile(source, fn, 'single'), ns)
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderModule = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderModule = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderModule = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
@unittest.skipIf(
not hasattr(unicodedata, '__file__') or
unicodedata.__file__[-4:] in (".pyc", ".pyo"),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(IOError, inspect.getsource, unicodedata)
self.assertRaises(IOError, inspect.findsource, unicodedata)
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
try:
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
finally:
del linecache.cache[co.co_filename]
class TestNoEOL(GetSourceBase):
def __init__(self, *args, **kwargs):
self.tempdir = TESTFN + '_dir'
os.mkdir(self.tempdir)
with open(os.path.join(self.tempdir,
'inspect_fodder3%spy' % os.extsep), 'w') as f:
f.write("class X:\n pass # No EOL")
with DirsOnSysPath(self.tempdir):
import inspect_fodder3 as mod3
self.fodderModule = mod3
GetSourceBase.__init__(self, *args, **kwargs)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_class(self):
self.assertSourceEqual(self.fodderModule.X, 1, 2)
class _BrokenDataDescriptor(object):
"""
A broken data descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ data descriptors")
def __set__(*args):
raise RuntimeError
def __getattr__(*args):
raise AssertionError("should not __getattr__ data descriptors")
class _BrokenMethodDescriptor(object):
"""
A broken method descriptor. See bug #1785.
"""
def __get__(*args):
raise AssertionError("should not __get__ method descriptors")
def __getattr__(*args):
raise AssertionError("should not __getattr__ method descriptors")
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None, formatted=None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def assertFullArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None,
kwonlyargs_e=[], kwonlydefaults_e=None,
ann_e={}, formatted=None):
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
inspect.getfullargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
self.assertEqual(kwonlyargs, kwonlyargs_e)
self.assertEqual(kwonlydefaults, kwonlydefaults_e)
self.assertEqual(ann, ann_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, ann),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted='(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', 'e', 'f'],
'g', 'h', (3, 4, 5),
'(a, b, c, d=3, e=4, f=5, *g, **h)')
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyworded, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.annotated, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyword_only_arg, [])
def test_getfullargspec(self):
self.assertFullArgSpecEquals(mod2.keyworded, [], varargs_e='arg1',
kwonlyargs_e=['arg2'],
kwonlydefaults_e={'arg2':1},
formatted='(*arg1, arg2=1)')
self.assertFullArgSpecEquals(mod2.annotated, ['arg1'],
ann_e={'arg1' : list},
formatted='(arg1: list)')
self.assertFullArgSpecEquals(mod2.keyword_only_arg, [],
kwonlyargs_e=['arg'],
formatted='(*, arg)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs,
'missing plain method: %r' % attrs)
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
def test_classify_builtin_types(self):
# Simple sanity check that all built-in types can have their
# attributes classified.
for name in dir(__builtins__):
builtin = getattr(__builtins__, name)
if isinstance(builtin, type):
inspect.classify_class_attrs(builtin)
def test_getmembers_descriptors(self):
class A(object):
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
def pred_wrapper(pred):
# A quick'n'dirty way to discard standard attributes of new-style
# classes.
class Empty(object):
pass
def wrapped(x):
if '__name__' in dir(x) and hasattr(Empty, x.__name__):
return False
return pred(x)
return wrapped
ismethoddescriptor = pred_wrapper(inspect.ismethoddescriptor)
isdatadescriptor = pred_wrapper(inspect.isdatadescriptor)
self.assertEqual(inspect.getmembers(A, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(A, isdatadescriptor),
[('dd', A.__dict__['dd'])])
class B(A):
pass
self.assertEqual(inspect.getmembers(B, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(B, isdatadescriptor),
[('dd', A.__dict__['dd'])])
def test_getmembers_method(self):
class B:
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
self.assertNotIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
_global_ref = object()
class TestGetClosureVars(unittest.TestCase):
def test_name_resolution(self):
# Basic test of the 4 different resolution mechanisms
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_generator_closure(self):
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
yield
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_method_closure(self):
class C:
def f(self, nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(C().f(_arg)), expected)
def test_nonlocal_vars(self):
# More complex tests of nonlocal resolution
def _nonlocal_vars(f):
return inspect.getclosurevars(f).nonlocals
def make_adder(x):
def add(y):
return x + y
return add
def curry(func, arg1):
return lambda arg2: func(arg1, arg2)
def less_than(a, b):
return a < b
# The infamous Y combinator.
def Y(le):
def g(f):
return le(lambda x: f(f)(x))
Y.g_ref = g
return g(g)
def check_y_combinator(func):
self.assertEqual(_nonlocal_vars(func), {'f': Y.g_ref})
inc = make_adder(1)
add_two = make_adder(2)
greater_than_five = curry(less_than, 5)
self.assertEqual(_nonlocal_vars(inc), {'x': 1})
self.assertEqual(_nonlocal_vars(add_two), {'x': 2})
self.assertEqual(_nonlocal_vars(greater_than_five),
{'arg1': 5, 'func': less_than})
self.assertEqual(_nonlocal_vars((lambda x: lambda y: x + y)(3)),
{'x': 3})
Y(check_y_combinator)
def test_getclosurevars_empty(self):
def foo(): pass
_empty = inspect.ClosureVars({}, {}, {}, set())
self.assertEqual(inspect.getclosurevars(lambda: True), _empty)
self.assertEqual(inspect.getclosurevars(foo), _empty)
def test_getclosurevars_error(self):
class T: pass
self.assertRaises(TypeError, inspect.getclosurevars, 1)
self.assertRaises(TypeError, inspect.getclosurevars, list)
self.assertRaises(TypeError, inspect.getclosurevars, {})
def _private_globals(self):
code = """def f(): print(path)"""
ns = {}
exec(code, ns)
return ns["f"], ns
def test_builtins_fallback(self):
f, ns = self._private_globals()
ns.pop("__builtins__", None)
expected = inspect.ClosureVars({}, {}, {"print":print}, {"path"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_dict(self):
f, ns = self._private_globals()
ns["__builtins__"] = {"path":1}
expected = inspect.ClosureVars({}, {}, {"path":1}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_module(self):
f, ns = self._private_globals()
ns["__builtins__"] = os
expected = inspect.ClosureVars({}, {}, {"path":os.path}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
class TestGetcallargsFunctions(unittest.TestCase):
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception as e:
ex1 = e
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception as e:
ex2 = e
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
del ex1, ex2
def makeCallable(self, signature):
"""Create a function that returns its locals()"""
code = "lambda %s: locals()"
return eval(code % signature)
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*collections.UserList([2])')
self.assertEqualCallArgs(f, '*collections.UserList([2, 3])')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3)')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *collections.UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3, c=4)')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **collections.UserDict(a=1, b=2)')
def test_keyword_only(self):
f = self.makeCallable('a=3, *, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, a=3')
self.assertEqualCallArgs(f, 'a=2, c=4')
self.assertEqualCallArgs(f, '4, c=4')
self.assertEqualException(f, '')
self.assertEqualException(f, '3')
self.assertEqualException(f, 'a=3')
self.assertEqualException(f, 'd=4')
f = self.makeCallable('*, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, d=4')
self.assertEqualCallArgs(f, 'd=4, c=3')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, *f, **g')
self.assertEqualCallArgs(f, '2, 3, 7')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), **collections.UserDict('
'y=9, z=10)')
f = self.makeCallable('a, b=2, *f, x, y=99, **g')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), q=0, **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), q=0, **collections.UserDict('
'y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
# XXX: success of this one depends on dict order
## self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
f4 = self.makeCallable('*, a, b=0')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
class TestGetattrStatic(unittest.TestCase):
def test_basic(self):
class Thing(object):
x = object()
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'x', None), Thing.x)
with self.assertRaises(AttributeError):
inspect.getattr_static(thing, 'y')
self.assertEqual(inspect.getattr_static(thing, 'y', 3), 3)
def test_inherited(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
something = OtherThing()
self.assertEqual(inspect.getattr_static(something, 'x'), Thing.x)
def test_instance_attr(self):
class Thing(object):
x = 2
def __init__(self, x):
self.x = x
thing = Thing(3)
self.assertEqual(inspect.getattr_static(thing, 'x'), 3)
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), 2)
def test_property(self):
class Thing(object):
@property
def x(self):
raise AttributeError("I'm pretending not to exist")
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_descriptor_raises_AttributeError(self):
class descriptor(object):
def __get__(*_):
raise AttributeError("I'm pretending not to exist")
desc = descriptor()
class Thing(object):
x = desc
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), desc)
def test_classAttribute(self):
class Thing(object):
x = object()
self.assertEqual(inspect.getattr_static(Thing, 'x'), Thing.x)
def test_inherited_classattribute(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
self.assertEqual(inspect.getattr_static(OtherThing, 'x'), Thing.x)
def test_slots(self):
class Thing(object):
y = 'bar'
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'y'), 'bar')
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_metaclass(self):
class meta(type):
attr = 'foo'
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'attr'), 'foo')
class sub(meta):
pass
class OtherThing(object, metaclass=sub):
x = 3
self.assertEqual(inspect.getattr_static(OtherThing, 'attr'), 'foo')
class OtherOtherThing(OtherThing):
pass
# this test is odd, but it was added as it exposed a bug
self.assertEqual(inspect.getattr_static(OtherOtherThing, 'x'), 3)
def test_no_dict_no_slots(self):
self.assertEqual(inspect.getattr_static(1, 'foo', None), None)
self.assertNotEqual(inspect.getattr_static('foo', 'lower'), None)
def test_no_dict_no_slots_instance_member(self):
# returns descriptor
with open(__file__) as handle:
self.assertEqual(inspect.getattr_static(handle, 'name'), type(handle).name)
def test_inherited_slots(self):
# returns descriptor
class Thing(object):
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
class OtherThing(Thing):
pass
# it would be nice if this worked...
# we get the descriptor instead of the instance attribute
self.assertEqual(inspect.getattr_static(OtherThing(), 'x'), Thing.x)
def test_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class Foo(object):
d = descriptor()
foo = Foo()
# for a non data descriptor we return the instance attribute
foo.__dict__['d'] = 1
self.assertEqual(inspect.getattr_static(foo, 'd'), 1)
# if the descriptor is a data-desciptor we should return the
# descriptor
descriptor.__set__ = lambda s, i, v: None
self.assertEqual(inspect.getattr_static(foo, 'd'), Foo.__dict__['d'])
def test_metaclass_with_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class meta(type):
d = descriptor()
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'd'), meta.__dict__['d'])
def test_class_as_property(self):
class Base(object):
foo = 3
class Something(Base):
executed = False
@property
def __class__(self):
self.executed = True
return object
instance = Something()
self.assertEqual(inspect.getattr_static(instance, 'foo'), 3)
self.assertFalse(instance.executed)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_mro_as_property(self):
class Meta(type):
@property
def __mro__(self):
return (object,)
class Base(object):
foo = 3
class Something(Base, metaclass=Meta):
pass
self.assertEqual(inspect.getattr_static(Something(), 'foo'), 3)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_dict_as_property(self):
test = self
test.called = False
class Foo(dict):
a = 3
@property
def __dict__(self):
test.called = True
return {}
foo = Foo()
foo.a = 4
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_custom_object_dict(self):
test = self
test.called = False
class Custom(dict):
def get(self, key, default=None):
test.called = True
super().get(key, default)
class Foo(object):
a = 3
foo = Foo()
foo.__dict__ = Custom()
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_metaclass_dict_as_property(self):
class Meta(type):
@property
def __dict__(self):
self.executed = True
class Thing(metaclass=Meta):
executed = False
def __init__(self):
self.spam = 42
instance = Thing()
self.assertEqual(inspect.getattr_static(instance, "spam"), 42)
self.assertFalse(Thing.executed)
def test_module(self):
sentinel = object()
self.assertIsNot(inspect.getattr_static(sys, "version", sentinel),
sentinel)
def test_metaclass_with_metaclass_with_dict_as_property(self):
class MetaMeta(type):
@property
def __dict__(self):
self.executed = True
return dict(spam=42)
class Meta(type, metaclass=MetaMeta):
executed = False
class Thing(metaclass=Meta):
pass
with self.assertRaises(AttributeError):
inspect.getattr_static(Thing, "spam")
self.assertFalse(Thing.executed)
class TestGetGeneratorState(unittest.TestCase):
def setUp(self):
def number_generator():
for number in range(5):
yield number
self.generator = number_generator()
def _generatorstate(self):
return inspect.getgeneratorstate(self.generator)
def test_created(self):
self.assertEqual(self._generatorstate(), inspect.GEN_CREATED)
def test_suspended(self):
next(self.generator)
self.assertEqual(self._generatorstate(), inspect.GEN_SUSPENDED)
def test_closed_after_exhaustion(self):
for i in self.generator:
pass
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_closed_after_immediate_exception(self):
with self.assertRaises(RuntimeError):
self.generator.throw(RuntimeError)
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_running(self):
# As mentioned on issue #10220, checking for the RUNNING state only
# makes sense inside the generator itself.
# The following generator checks for this by using the closure's
# reference to self and the generator state checking helper method
def running_check_generator():
for number in range(5):
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
yield number
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
self.generator = running_check_generator()
# Running up to the first yield
next(self.generator)
# Running after the first yield
next(self.generator)
def test_easy_debugging(self):
# repr() and str() of a generator state should contain the state name
names = 'GEN_CREATED GEN_RUNNING GEN_SUSPENDED GEN_CLOSED'.split()
for name in names:
state = getattr(inspect, name)
self.assertIn(name, repr(state))
self.assertIn(name, str(state))
def test_getgeneratorlocals(self):
def each(lst, a=None):
b=(1, 2, 3)
for v in lst:
if v == 3:
c = 12
yield v
numbers = each([1, 2, 3])
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3]})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 1,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 2,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 3,
'b': (1, 2, 3), 'c': 12})
try:
next(numbers)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(numbers), {})
def test_getgeneratorlocals_empty(self):
def yield_one():
yield 1
one = yield_one()
self.assertEqual(inspect.getgeneratorlocals(one), {})
try:
next(one)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(one), {})
def test_getgeneratorlocals_error(self):
self.assertRaises(TypeError, inspect.getgeneratorlocals, 1)
self.assertRaises(TypeError, inspect.getgeneratorlocals, lambda x: True)
self.assertRaises(TypeError, inspect.getgeneratorlocals, set)
self.assertRaises(TypeError, inspect.getgeneratorlocals, (2,3))
class TestSignatureObject(unittest.TestCase):
@staticmethod
def signature(func):
sig = inspect.signature(func)
return (tuple((param.name,
(... if param.default is param.empty else param.default),
(... if param.annotation is param.empty
else param.annotation),
str(param.kind).lower())
for param in sig.parameters.values()),
(... if sig.return_annotation is sig.empty
else sig.return_annotation))
def test_signature_object(self):
S = inspect.Signature
P = inspect.Parameter
self.assertEqual(str(S()), '()')
def test(po, pk, *args, ko, **kwargs):
pass
sig = inspect.signature(test)
po = sig.parameters['po'].replace(kind=P.POSITIONAL_ONLY)
pk = sig.parameters['pk']
args = sig.parameters['args']
ko = sig.parameters['ko']
kwargs = sig.parameters['kwargs']
S((po, pk, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((pk, po, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, args, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((args, po, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, pk, args, kwargs, ko))
kwargs2 = kwargs.replace(name='args')
with self.assertRaisesRegex(ValueError, 'duplicate parameter name'):
S((po, pk, args, kwargs2, ko))
def test_signature_immutability(self):
def test(a):
pass
sig = inspect.signature(test)
with self.assertRaises(AttributeError):
sig.foo = 'bar'
with self.assertRaises(TypeError):
sig.parameters['a'] = None
def test_signature_on_noarg(self):
def test():
pass
self.assertEqual(self.signature(test), ((), ...))
def test_signature_on_wargs(self):
def test(a, b:'foo') -> 123:
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., 'foo', "positional_or_keyword")),
123))
def test_signature_on_wkwonly(self):
def test(*, a:float, b:str) -> int:
pass
self.assertEqual(self.signature(test),
((('a', ..., float, "keyword_only"),
('b', ..., str, "keyword_only")),
int))
def test_signature_on_complex_args(self):
def test(a, b:'foo'=10, *args:'bar', spam:'baz', ham=123, **kwargs:int):
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, 'foo', "positional_or_keyword"),
('args', ..., 'bar', "var_positional"),
('spam', ..., 'baz', "keyword_only"),
('ham', 123, ..., "keyword_only"),
('kwargs', ..., int, "var_keyword")),
...))
def test_signature_on_builtin_function(self):
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
inspect.signature(type)
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
# support for 'wrapper_descriptor'
inspect.signature(type.__call__)
with self.assertRaisesRegex(ValueError, 'not supported by signature'):
# support for 'method-wrapper'
inspect.signature(min.__call__)
with self.assertRaisesRegex(ValueError,
'no signature found for builtin function'):
# support for 'method-wrapper'
inspect.signature(min)
def test_signature_on_non_function(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
inspect.signature(42)
with self.assertRaisesRegex(TypeError, 'is not a Python function'):
inspect.Signature.from_function(42)
def test_signature_on_method(self):
class Test:
def foo(self, arg1, arg2=1) -> int:
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "positional_or_keyword")),
int))
def test_signature_on_classmethod(self):
class Test:
@classmethod
def foo(cls, arg1, *, arg2=1):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
def test_signature_on_staticmethod(self):
class Test:
@staticmethod
def foo(cls, *, arg):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
def test_signature_on_partial(self):
from functools import partial
def test():
pass
self.assertEqual(self.signature(partial(test)), ((), ...))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, 1))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, a=1))
def test(a, b, *, c, d):
pass
self.assertEqual(self.signature(partial(test)),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1)),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1, c=2)),
((('b', ..., ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, b=1, c=2)),
((('a', ..., ..., "positional_or_keyword"),
('b', 1, ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 0, b=1, c=2)),
((('b', 1, ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only"),),
...))
def test(a, *args, b, **kwargs):
pass
self.assertEqual(self.signature(partial(test, 1)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=True)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=1, b=0)),
((('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0, test=1)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
def test(a, b, c:int) -> 42:
pass
sig = test.__signature__ = inspect.signature(test)
self.assertEqual(self.signature(partial(partial(test, 1))),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., int, "positional_or_keyword")),
42))
self.assertEqual(self.signature(partial(partial(test, 1), 2)),
((('c', ..., int, "positional_or_keyword"),),
42))
psig = inspect.signature(partial(partial(test, 1), 2))
def foo(a):
return a
_foo = partial(partial(foo, a=10), a=20)
self.assertEqual(self.signature(_foo),
((('a', 20, ..., "positional_or_keyword"),),
...))
# check that we don't have any side-effects in signature(),
# and the partial object is still functioning
self.assertEqual(_foo(), 20)
def foo(a, b, c):
return a, b, c
_foo = partial(partial(foo, 1, b=20), b=30)
self.assertEqual(self.signature(_foo),
((('b', 30, ..., "positional_or_keyword"),
('c', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(_foo(c=10), (1, 30, 10))
_foo = partial(_foo, 2) # now 'b' has two values -
# positional and keyword
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(_foo)
def foo(a, b, c, *, d):
return a, b, c, d
_foo = partial(partial(foo, d=20, c=20), b=10, d=30)
self.assertEqual(self.signature(_foo),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, ..., "positional_or_keyword"),
('c', 20, ..., "positional_or_keyword"),
('d', 30, ..., "keyword_only")),
...))
ba = inspect.signature(_foo).bind(a=200, b=11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (200, 11, 20, 30))
def foo(a=1, b=2, c=3):
return a, b, c
_foo = partial(foo, a=10, c=13)
ba = inspect.signature(_foo).bind(11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 2, 13))
ba = inspect.signature(_foo).bind(11, 12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(11, b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (10, 12, 13))
_foo = partial(_foo, b=10)
ba = inspect.signature(_foo).bind(12, 14)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (12, 14, 13))
def test_signature_on_decorated(self):
import functools
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(*args, **kwargs)
return wrapper
class Foo:
@decorator
def bar(self, a, b):
pass
self.assertEqual(self.signature(Foo.bar),
((('self', ..., ..., "positional_or_keyword"),
('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().bar),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
# Test that we handle method wrappers correctly
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(42, *args, **kwargs)
sig = inspect.signature(func)
new_params = tuple(sig.parameters.values())[1:]
wrapper.__signature__ = sig.replace(parameters=new_params)
return wrapper
class Foo:
@decorator
def __call__(self, a, b):
pass
self.assertEqual(self.signature(Foo.__call__),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().__call__),
((('b', ..., ..., "positional_or_keyword"),),
...))
def test_signature_on_class(self):
class C:
def __init__(self, a):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __call__(cls, a):
pass
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
class CMM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
def __call__(cls, nm, bs, dt):
return type(nm, bs, dt)
class CM(type, metaclass=CMM):
def __new__(mcls, name, bases, dct, *, bar=2):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CMM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
self.assertEqual(self.signature(CM),
((('nm', ..., ..., "positional_or_keyword"),
('bs', ..., ..., "positional_or_keyword"),
('dt', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __init__(cls, name, bases, dct, *, bar=2):
return super().__init__(name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('bar', 2, ..., "keyword_only")),
...))
def test_signature_on_callable_objects(self):
class Foo:
def __call__(self, a):
pass
self.assertEqual(self.signature(Foo()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class Spam:
pass
with self.assertRaisesRegex(TypeError, "is not a callable object"):
inspect.signature(Spam())
class Bar(Spam, Foo):
pass
self.assertEqual(self.signature(Bar()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class ToFail:
__call__ = type
with self.assertRaisesRegex(ValueError, "not supported by signature"):
inspect.signature(ToFail())
class Wrapped:
pass
Wrapped.__wrapped__ = lambda a: None
self.assertEqual(self.signature(Wrapped),
((('a', ..., ..., "positional_or_keyword"),),
...))
def test_signature_on_lambdas(self):
self.assertEqual(self.signature((lambda a=10: a)),
((('a', 10, ..., "positional_or_keyword"),),
...))
def test_signature_equality(self):
def foo(a, *, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), 42)
def bar(a, *, b:int) -> float: pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int) -> int: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int=42) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, c) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def spam(b:int, a) -> float: pass
self.assertNotEqual(inspect.signature(spam), inspect.signature(bar))
def foo(*, a, b, c): pass
def bar(*, c, b, a): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(*, a=1, b, c): pass
def bar(*, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a=1, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *args, a=42, b, c, **kwargs:int): pass
def bar(pos, *args, c, b, a=42, **kwargs:int): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def test_signature_unhashable(self):
def foo(a): pass
sig = inspect.signature(foo)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(sig)
def test_signature_str(self):
def foo(a:int=1, *, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *, b, c=None, **kwargs) -> 42')
def foo(a:int=1, *args, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *args, b, c=None, **kwargs) -> 42')
def foo():
pass
self.assertEqual(str(inspect.signature(foo)), '()')
def test_signature_str_positional_only(self):
P = inspect.Parameter
def test(a_po, *, b, **kwargs):
return a_po, kwargs
sig = inspect.signature(test)
new_params = list(sig.parameters.values())
new_params[0] = new_params[0].replace(kind=P.POSITIONAL_ONLY)
test.__signature__ = sig.replace(parameters=new_params)
self.assertEqual(str(inspect.signature(test)),
'(<a_po>, *, b, **kwargs)')
sig = inspect.signature(test)
new_params = list(sig.parameters.values())
new_params[0] = new_params[0].replace(name=None)
test.__signature__ = sig.replace(parameters=new_params)
self.assertEqual(str(inspect.signature(test)),
'(<0>, *, b, **kwargs)')
def test_signature_replace_anno(self):
def test() -> 42:
pass
sig = inspect.signature(test)
sig = sig.replace(return_annotation=None)
self.assertIs(sig.return_annotation, None)
sig = sig.replace(return_annotation=sig.empty)
self.assertIs(sig.return_annotation, sig.empty)
sig = sig.replace(return_annotation=42)
self.assertEqual(sig.return_annotation, 42)
self.assertEqual(sig, inspect.signature(test))
class TestParameterObject(unittest.TestCase):
def test_signature_parameter_kinds(self):
P = inspect.Parameter
self.assertTrue(P.POSITIONAL_ONLY < P.POSITIONAL_OR_KEYWORD < \
P.VAR_POSITIONAL < P.KEYWORD_ONLY < P.VAR_KEYWORD)
self.assertEqual(str(P.POSITIONAL_ONLY), 'POSITIONAL_ONLY')
self.assertTrue('POSITIONAL_ONLY' in repr(P.POSITIONAL_ONLY))
def test_signature_parameter_object(self):
p = inspect.Parameter('foo', default=10,
kind=inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(p.name, 'foo')
self.assertEqual(p.default, 10)
self.assertIs(p.annotation, p.empty)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
with self.assertRaisesRegex(ValueError, 'invalid value'):
inspect.Parameter('foo', default=10, kind='123')
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
inspect.Parameter('1', kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError,
'non-positional-only parameter'):
inspect.Parameter(None, kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_POSITIONAL)
p = inspect.Parameter('a', default=42,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
p.replace(kind=inspect.Parameter.VAR_POSITIONAL)
self.assertTrue(repr(p).startswith('<Parameter'))
def test_signature_parameter_equality(self):
P = inspect.Parameter
p = P('foo', default=42, kind=inspect.Parameter.KEYWORD_ONLY)
self.assertEqual(p, p)
self.assertNotEqual(p, 42)
self.assertEqual(p, P('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY))
def test_signature_parameter_unhashable(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(p)
def test_signature_parameter_replace(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
self.assertIsNot(p, p.replace())
self.assertEqual(p, p.replace())
p2 = p.replace(annotation=1)
self.assertEqual(p2.annotation, 1)
p2 = p2.replace(annotation=p2.empty)
self.assertEqual(p, p2)
p2 = p2.replace(name='bar')
self.assertEqual(p2.name, 'bar')
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
p2 = p2.replace(name=p2.empty)
p2 = p2.replace(name='foo', default=None)
self.assertIs(p2.default, None)
self.assertNotEqual(p2, p)
p2 = p2.replace(name='foo', default=p2.empty)
self.assertIs(p2.default, p2.empty)
p2 = p2.replace(default=42, kind=p2.POSITIONAL_OR_KEYWORD)
self.assertEqual(p2.kind, p2.POSITIONAL_OR_KEYWORD)
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError, 'invalid value for'):
p2 = p2.replace(kind=p2.empty)
p2 = p2.replace(kind=p2.KEYWORD_ONLY)
self.assertEqual(p2, p)
def test_signature_parameter_positional_only(self):
p = inspect.Parameter(None, kind=inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(str(p), '<>')
p = p.replace(name='1')
self.assertEqual(str(p), '<1>')
def test_signature_parameter_immutability(self):
p = inspect.Parameter(None, kind=inspect.Parameter.POSITIONAL_ONLY)
with self.assertRaises(AttributeError):
p.foo = 'bar'
with self.assertRaises(AttributeError):
p.kind = 123
class TestSignatureBind(unittest.TestCase):
@staticmethod
def call(func, *args, **kwargs):
sig = inspect.signature(func)
ba = sig.bind(*args, **kwargs)
return func(*ba.args, **ba.kwargs)
def test_signature_bind_empty(self):
def test():
return 42
self.assertEqual(self.call(test), 42)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, spam=10)
with self.assertRaisesRegex(TypeError, 'too many keyword arguments'):
self.call(test, spam=1)
def test_signature_bind_var(self):
def test(*args, **kwargs):
return args, kwargs
self.assertEqual(self.call(test), ((), {}))
self.assertEqual(self.call(test, 1), ((1,), {}))
self.assertEqual(self.call(test, 1, 2), ((1, 2), {}))
self.assertEqual(self.call(test, foo='bar'), ((), {'foo': 'bar'}))
self.assertEqual(self.call(test, 1, foo='bar'), ((1,), {'foo': 'bar'}))
self.assertEqual(self.call(test, args=10), ((), {'args': 10}))
self.assertEqual(self.call(test, 1, 2, foo='bar'),
((1, 2), {'foo': 'bar'}))
def test_signature_bind_just_args(self):
def test(a, b, c):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, 2, 3, 4)
with self.assertRaisesRegex(TypeError, "'b' parameter lacking default"):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, "'a' parameter lacking default"):
self.call(test)
def test(a, b, c=10):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
self.assertEqual(self.call(test, 1, 2), (1, 2, 10))
def test(a=1, b=2, c=3):
return a, b, c
self.assertEqual(self.call(test, a=10, c=13), (10, 2, 13))
self.assertEqual(self.call(test, a=10), (10, 2, 3))
self.assertEqual(self.call(test, b=10), (1, 10, 3))
def test_signature_bind_varargs_order(self):
def test(*args):
return args
self.assertEqual(self.call(test), ())
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
def test_signature_bind_args_and_varargs(self):
def test(a, b, c=3, *args):
return a, b, c, args
self.assertEqual(self.call(test, 1, 2, 3, 4, 5), (1, 2, 3, (4, 5)))
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, ()))
self.assertEqual(self.call(test, b=1, a=2), (2, 1, 3, ()))
self.assertEqual(self.call(test, 1, b=2), (1, 2, 3, ()))
with self.assertRaisesRegex(TypeError,
"multiple values for argument 'c'"):
self.call(test, 1, 2, 3, c=4)
def test_signature_bind_just_kwargs(self):
def test(**kwargs):
return kwargs
self.assertEqual(self.call(test), {})
self.assertEqual(self.call(test, foo='bar', spam='ham'),
{'foo': 'bar', 'spam': 'ham'})
def test_signature_bind_args_and_kwargs(self):
def test(a, b, c=3, **kwargs):
return a, b, c, kwargs
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, {}))
self.assertEqual(self.call(test, 1, 2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, b=2, a=1, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, a=1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, c=4, foo='bar', spam='ham'),
(1, 2, 4, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, 2, 4, foo='bar'),
(1, 2, 4, {'foo': 'bar'}))
self.assertEqual(self.call(test, c=5, a=4, b=3),
(4, 3, 5, {}))
def test_signature_bind_kwonly(self):
def test(*, foo):
return foo
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1)
self.assertEqual(self.call(test, foo=1), 1)
def test(a, *, foo=1, bar):
return foo
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar):
return foo, bar
self.assertEqual(self.call(test, 1, bar=2), (1, 2))
self.assertEqual(self.call(test, bar=2, foo=1), (1, 2))
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, bar=2, foo=1, spam=10)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2, bar=2)
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, 1, bar=2, spam='ham')
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar, **bin):
return foo, bar, bin
self.assertEqual(self.call(test, 1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, foo=1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, 1, bar=2, spam='ham'),
(1, 2, {'spam': 'ham'}))
self.assertEqual(self.call(test, spam='ham', foo=1, bar=2),
(1, 2, {'spam': 'ham'}))
with self.assertRaisesRegex(TypeError,
"'foo' parameter lacking default value"):
self.call(test, spam='ham', bar=2)
self.assertEqual(self.call(test, 1, bar=2, bin=1, spam=10),
(1, 2, {'bin': 1, 'spam': 10}))
def test_signature_bind_arguments(self):
def test(a, *args, b, z=100, **kwargs):
pass
sig = inspect.signature(test)
ba = sig.bind(10, 20, b=30, c=40, args=50, kwargs=60)
# we won't have 'z' argument in the bound arguments object, as we didn't
# pass it to the 'bind'
self.assertEqual(tuple(ba.arguments.items()),
(('a', 10), ('args', (20,)), ('b', 30),
('kwargs', {'c': 40, 'args': 50, 'kwargs': 60})))
self.assertEqual(ba.kwargs,
{'b': 30, 'c': 40, 'args': 50, 'kwargs': 60})
self.assertEqual(ba.args, (10, 20))
def test_signature_bind_positional_only(self):
P = inspect.Parameter
def test(a_po, b_po, c_po=3, foo=42, *, bar=50, **kwargs):
return a_po, b_po, c_po, foo, bar, kwargs
sig = inspect.signature(test)
new_params = collections.OrderedDict(tuple(sig.parameters.items()))
for name in ('a_po', 'b_po', 'c_po'):
new_params[name] = new_params[name].replace(kind=P.POSITIONAL_ONLY)
new_sig = sig.replace(parameters=new_params.values())
test.__signature__ = new_sig
self.assertEqual(self.call(test, 1, 2, 4, 5, bar=6),
(1, 2, 4, 5, 6, {}))
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, 1, 2, c_po=4)
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, a_po=1, b_po=2)
def test_signature_bind_with_self_arg(self):
# Issue #17071: one of the parameters is named "self
def test(a, self, b):
pass
sig = inspect.signature(test)
ba = sig.bind(1, 2, 3)
self.assertEqual(ba.args, (1, 2, 3))
ba = sig.bind(1, self=2, b=3)
self.assertEqual(ba.args, (1, 2, 3))
class TestBoundArguments(unittest.TestCase):
def test_signature_bound_arguments_unhashable(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(ba)
def test_signature_bound_arguments_equality(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba)
ba2 = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba2)
ba3 = inspect.signature(foo).bind(2)
self.assertNotEqual(ba, ba3)
ba3.arguments['a'] = 1
self.assertEqual(ba, ba3)
def bar(b): pass
ba4 = inspect.signature(bar).bind(1)
self.assertNotEqual(ba, ba4)
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods, TestGetattrStatic, TestGetGeneratorState,
TestNoEOL, TestSignatureObject, TestSignatureBind, TestParameterObject,
TestBoundArguments, TestGetClosureVars
)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -9,168,784,811,708,351,000 | 37.467714 | 88 | 0.540309 | false |
d7415/merlin | Hooks/user/edituser.py | 1 | 4554 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Core.config import Config
from Core.db import session
from Core.maps import User
from Core.chanusertracker import CUT
from Core.loadable import loadable, route, require_user
class edituser(loadable):
"""Used to change a user's access or (de)activate them"""
usage = " <user> (<access>|true|false)"
@route(r"(.+)\s+(\S+)", access = "admin")
@require_user
def execute(self, message, user, params):
usernames = params.group(1)
access = params.group(2).lower()
if access.isdigit():
access = int(access)
elif access in self.true:
access = True
elif access in self.false:
access = False
else:
try:
access = Config.getint("Access",access)
except Exception:
message.reply("Invalid access level '%s'" % (access,))
return
addnicks = []
remnicks = []
changed = []
mbraxx = Config.getint("Access","member")
home = Config.get("Channels","home")
for username in usernames.split():
member = User.load(name=username, active=False)
if member is None:
message.alert("No such user '%s'" % (username,))
return
if type(access) is int and not member.active:
message.reply("You should first re-activate user %s" %(member.name,))
return
if access > user.access or member.access > user.access:
message.reply("You may not change access higher than your own")
return
changed.append(username)
if type(access) == int:
if member.active == True and member.access < mbraxx and access >= mbraxx:
addnicks.append(member.name)
if member.active == True and member.access >= mbraxx and access < mbraxx:
message.privmsg("remuser %s %s"%(home, member.name,), Config.get("Services", "nick"))
remnicks.append(member.name)
# message.privmsg("ban %s *!*@%s.%s GTFO, EAAD"%(home, member.name, Config.get("Services", "usermask"),), Config.get("Services", "nick"))
member.access = access
else:
if member.active != access and access == True and member.access >= mbraxx:
addnicks.append(member.name)
if member.active != access and access == False and member.access >= mbraxx:
message.privmsg("remuser %s %s"%(home, member.name,), Config.get("Services", "nick"))
remnicks.append(member.name)
# message.privmsg("ban %s *!*@%s.%s GTFO, EAAD"%(home, member.name, Config.get("Services", "usermask"),), Config.get("Services", "nick"))
member.active = access
if not member.active:
CUT.untrack_user(member.name)
session.commit()
if addnicks:
message.privmsg("adduser %s %s 24" %(home, ",".join(addnicks),), Config.get("Services", "nick"))
message.reply("%s ha%s been added to %s"%(", ".join(addnicks), "ve" if len(addnicks) > 1 else "s", home,))
if remnicks:
message.reply("%s ha%s been removed from %s"%(", ".join(remnicks), "ve" if len(remnicks) > 1 else "s", home,))
if changed:
message.reply("Editted user%s %s access to %s" % ("s" if len(changed) > 1 else "", ", ".join(changed), access,))
| gpl-2.0 | -7,891,953,885,707,710,000 | 45.469388 | 156 | 0.593105 | false |
wkentaro/chainer | chainer/gradient_check.py | 1 | 42059 | from __future__ import absolute_import
import math
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import configuration
from chainer import testing
from chainer import utils
from chainer import variable
import chainerx
class NondifferentiableError(Exception):
pass
def _copy_arrays(xs):
xp = backend.get_array_module(*xs)
if xp is chainerx:
return [
None if x is None
else xp.array(x, dtype=numpy.float64, copy=True, device=x.device)
for x in xs]
else:
return [xp.array(x, dtype=numpy.float64, copy=True) for x in xs]
def _ones_like(arr):
device = backend.get_device_from_array(arr)
with chainer.using_device(device):
return device.xp.ones_like(arr)
def _make_outputs_props_in_error_message(outputs, grad_outputs):
return (
'Output shapes and dtypes : {}\n'
'Output gradient shapes and dtypes: {}'.format(
utils._format_array_props(outputs),
utils._format_array_props(grad_outputs)))
def _check_outputs_and_grad_outputs(outputs, grad_outputs):
if len(outputs) != len(grad_outputs):
raise ValueError(
'Output gradients must contain equally as many elements as '
'the number of output elements.\n'
'{}'.format(
_make_outputs_props_in_error_message(outputs, grad_outputs)))
shapes_match = True
dtypes_match = True
for y, gy in zip(outputs, grad_outputs):
if gy is None:
continue
if y is None and (gy == 0).all():
continue
if y.shape != gy.shape:
shapes_match = False
if y.dtype != gy.dtype:
dtypes_match = False
if not (shapes_match and dtypes_match):
raise ValueError(
'Shapes and/or dtypes of outputs and output gradients do not '
'match.\n'
'{}'.format(
_make_outputs_props_in_error_message(outputs, grad_outputs)))
def numerical_grad(
f, inputs, grad_outputs, eps=1e-3,
detect_nondifferentiable=False, diff_atol=0, diff_rtol=1e-2,
center_outputs=None):
"""Computes numerical gradient by finite differences.
This function is used to implement gradient check. For usage example, see
unit tests of :mod:`chainer.functions`.
By default, ``numerical_grad`` computes the gradient to the first order of
``eps``.
Args:
f (callable): Python function with no arguments that runs forward
computation and returns the result.
inputs (tuple of arrays): Tuple of arrays that should be treated as
inputs. Each element of them is slightly modified to realize
numerical gradient by finite differences.
grad_outputs (tuple of arrays or scalars): Tuple of arrays or scalars
that are treated as output gradients.
eps (float): Epsilon value of finite differences.
detect_nondifferentiable (bool):
``False`` by default.
If ``True``, ``numerical_grad`` checks whether ``f`` is
differentiable at ``inputs``.
It requires evaluation of ``f`` at 5 points instead of 2.
As a side effect, the accuracy of numerical gradient will be
increased to the third order of ``eps``.
If it turns out that ``f`` is non-differentiable at ``input``,
``numerical_grad`` raises
:class:`~chainer.gradient_check.NondifferentiableError`.
diff_atol (float):
Absolute tolerance of fitting error of non-differentiable point
detection.
diff_rtol (float):
Tolerance of fitting error of non-differentiable point detection
relative to the output values of ``f``.
center_outputs (tuple of arrays or None):
Only used if ``detect_nondifferentiable`` is ``True``.
If specified, these arrays are used as the outputs of ``f`` at
``inputs``.
Otherwise, it is calculated.
It can be used to reduce the computation if these arrays are
already calculated before calling ``numerical_grad``.
Returns:
tuple: Numerical gradient arrays corresponding to ``inputs``.
"""
# TODO(niboshi): Deprecate `center_outputs` argument.
# If dtype of this argument is not float64, often the resolution is
# insufficient for numerical gradient calculation. We might use it only
# when its dtype is float64, but it would be better to simply remove it.
center_outputs = None
assert eps > 0
assert isinstance(inputs, (tuple, list))
for x in inputs:
if x.dtype.kind != 'f':
raise RuntimeError(
'The dtype of input arrays must be kind of float')
inputs = tuple(inputs)
# Cast grad_outputs to float64
grad_outputs = tuple([
None if g is None
else numpy.float64(g) if numpy.isscalar(g)
else g.astype(numpy.float64)
for g in grad_outputs])
if not chainer.is_arrays_compatible(
[a for a in inputs + grad_outputs if not numpy.isscalar(a)]):
raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
device = backend.get_device_from_array(*(inputs + grad_outputs))
xp = device.xp
if xp is cuda.cupy:
numerical_grad_kernel_1 = cuda.reduce(
'T y1, T y2, U gy, T eps', 'V gxi',
'(y1 - y2) * gy', 'a + b', 'gxi += a / (eps * 2)', '0',
'numerical_grad_kernel_1'
)
numerical_grad_kernel_3 = cuda.reduce(
'T y1, T y2, T y3, T y4, U gy, T eps', 'V gxi',
'(-y1 + 8 * y2 - 8 * y3 + y4) * gy',
'a + b', 'gxi += a / (eps * 6)', '0',
'numerical_grad_kernel_3'
)
if xp is chainerx:
grads = [
xp.zeros(x.shape, numpy.float64, device=x.device) for x in inputs]
else:
grads = [xp.zeros(x.shape, numpy.float64) for x in inputs]
if detect_nondifferentiable:
if center_outputs is None:
ys0 = _copy_arrays(f())
else:
ys0 = center_outputs
nout = len(ys0)
shapes = [y0.shape for y0 in ys0]
sizes = numpy.array([y0.size for y0 in ys0])
cumsizes = numpy.cumsum(sizes)
# Evaluate func at a single input
def eval_func(x, x_ind, delta, orig):
x[x_ind] = orig + delta
ys = _copy_arrays(f())
assert len(ys) == len(grad_outputs)
assert all([
gy is None
for y, gy in zip(ys, grad_outputs)
if y is None])
assert all([
gy is None or numpy.isscalar(gy) or y.shape == gy.shape
for y, gy in zip(ys, grad_outputs)])
x[x_ind] = orig
return ys
# An iteration on a single input displacement
def iterate_single_input(i_in, x, orig_x, x_ind):
orig = orig_x[x_ind]
# `yss` holds a list of output arrays for each of 2 or 5 sampling
# points.
if detect_nondifferentiable:
yss = [
eval_func(x, x_ind, -eps * 1., orig),
eval_func(x, x_ind, -eps * .5, orig),
ys0,
eval_func(x, x_ind, +eps * .5, orig),
eval_func(x, x_ind, +eps * 1., orig),
]
else:
yss = [
eval_func(x, x_ind, -eps * 1, orig),
eval_func(x, x_ind, +eps * 1, orig),
]
assert all([
y is None
or (y.shape == yss[0][i].shape and y.dtype == yss[0][i].dtype)
for ys in yss
for i, y in enumerate(ys)])
# If all the outputs are 0-size, skip non-differentiable check.
if all([y is None or y.size == 0 for y in yss[0]]):
detect_nondifferentiable_ = False
else:
detect_nondifferentiable_ = detect_nondifferentiable
if detect_nondifferentiable_:
# Detect non-differentiable point by quadratic fitting
# Check for non-finite output.
# If any single element in the output arrays has different
# finiteness among sampled points, that means this is a
# non-differentiable point.
# If the function consistently generates non-finite values
# around the point, we do not treat the point as
# non-differentiable.
# (Example: x<0 region for the logarithm function)
any_nonfinite = False
for i_out in range(nout):
isfinites = [xp.isfinite(ys[i_out]) for ys in yss]
if any((isfinites[0] != isfinites[i]).any()
for i in range(1, len(yss))):
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(x_ind))
s.write('eps: {}\n'.format(eps))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
any_nonfinite |= not all((_).all() for _ in isfinites)
if not any_nonfinite:
# Stack flattened outputs to make (5, *)-shaped 2D array
ystack = xp.vstack(
[xp.hstack([y.ravel() for y in ys]) for ys in yss])
assert ystack.ndim == 2 and ystack.shape[0] == len(yss)
# Fit to quadratic
if xp is not numpy:
ystack = _cpu._to_cpu(ystack)
polyfit = numpy.polynomial.polynomial.polyfit
_, (residuals, _, _, _) = polyfit(
range(len(yss)), ystack, deg=2, full=True)
if xp is not numpy:
residuals = device.send(residuals)
residuals = xp.sqrt(residuals / len(yss))
# Check for error for each output array
for i_out in range(nout):
size = sizes[i_out]
cumsize = cumsizes[i_out]
shape = shapes[i_out]
# TODO(niboshi): The following two lines could be
# rewritten using xp.stack, which is supported in
# NumPy>=1.10
ymax = xp.concatenate(
[ys[i_out][None] for ys in yss]).max(axis=0)
ymin = xp.concatenate(
[ys[i_out][None] for ys in yss]).min(axis=0)
# Restore the shape of flattened residual
res = residuals[cumsize - size:cumsize]
res = res.reshape(shape)
det = utils.force_array(
diff_atol + diff_rtol * (ymax - ymin) < res)
# Constant output = not nondifferentiable
det[ymax == ymin] = False
if det.any():
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(x_ind))
s.write('eps: {}\n'.format(eps))
s.write('diff_rtol: {}\n'.format(diff_rtol))
s.write('diff_atol: {}\n'.format(diff_atol))
s.write('ymax: {}\n'.format(ymax))
s.write('ymin: {}\n'.format(ymin))
s.write(
'diff_atol + diff_rtol * (ymax-ymin): {}\n'.format(
diff_atol + diff_rtol * (ymax - ymin)))
s.write('fitting errors: {}\n'.format(res))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
# Calculate numerical gradient
for i_out, gy in enumerate(grad_outputs):
if gy is None:
continue
if not numpy.isscalar(gy):
gy = gy.astype(numpy.float64, copy=False)
gpu_ = (xp is cuda.cupy and
all(isinstance(ys[i_out], cuda.ndarray)
for ys in yss))
# If any output sample is None, all others must be.
assert all([
(yss[0][i_out] is None) == (yss[j][i_out] is None)
for j in range(len(yss))])
# If outputs samples are None, the part of numeric gradient for
# this output is considered as zero: skip the accumulation.
if yss[0][i_out] is None:
continue
if len(yss) == 2: # 1st order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
if gpu_:
numerical_grad_kernel_1(
y1, y0, xp.asarray(gy), eps, gx[x_ind])
else:
dot = ((y1 - y0) * gy).sum()
gx[x_ind] = gx[x_ind] + dot / (2 * eps)
elif len(yss) == 5: # 3rd order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
y2 = yss[3][i_out]
y3 = yss[4][i_out]
if gpu_:
numerical_grad_kernel_3(
y3, y2, y1, y0, gy, eps, gx[x_ind])
else:
num = -y3 + 8 * y2 - 8 * y1 + y0
dot = (num * gy).sum()
gx[x_ind] = gx[x_ind] + dot / (6 * eps)
else:
assert False
# Calculate numeric gradient
with configuration.using_config('type_check', False):
for i_in, (x, gx) in enumerate(six.moves.zip(inputs, grads)):
orig_x = x.copy() # hold original value
for x_ind in numpy.ndindex(x.shape):
iterate_single_input(i_in, x, orig_x, x_ind)
return [g.astype(x.dtype, copy=False)
for g, x in six.moves.zip(grads, inputs)]
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
"""Asserts if some corresponding element of x and y differs too much.
This function can handle both CPU and GPU arrays simultaneously.
Args:
x: Left-hand-side array.
y: Right-hand-side array.
atol (float): Absolute tolerance.
rtol (float): Relative tolerance.
verbose (bool): If ``True``, it outputs verbose messages on error.
"""
warnings.warn(
'chainer.gradient_check.assert_allclose is deprecated. '
'Use chainer.testing.assert_allclose instead.',
DeprecationWarning)
testing.assert_allclose(x, y, atol, rtol, verbose)
def _as_tuple(xs):
if isinstance(xs, tuple):
return xs
elif isinstance(xs, list):
return tuple(xs)
else:
return xs,
class _CheckBackward(object):
def __init__(
self, func, xs, gys, params, eps, atol, rtol, no_gxs,
dtype, detect_nondifferentiable, is_immutable_params):
# If `is_immutable_params` is `False`, `params` are expected to be of
# type `chainer.Parameter` and are updated in-place.
# To run `_CheckBackward` with ChainerX ndarrays however which cannot
# be updated in-place when wrapped in `chainer.Parameter`s, this flag
# should be `True` and parameters should be given as ndarrays.
# `func` in the former case must take inputs as arguments only. In the
# latter, it must take the parameters in addition.
if dtype is not None and numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
if is_immutable_params:
if not all(
isinstance(p, chainer.get_array_types()) for p in params):
raise ValueError(
'All parameters in `params` must be ndarrays if '
'`is_immutable_params` is `True`. Actual: {}.'.format(
', '.join(str(type(p)) for p in params)))
xs = _as_tuple(xs)
if gys is not None:
gys = _as_tuple(gys)
params = _as_tuple(params)
if no_gxs is None:
no_gxs = [None if x is None else x.dtype.kind != 'f' for x in xs]
else:
if len(no_gxs) != len(xs):
raise ValueError(
'Length of no_grads param and xs should be same.\n'
'Actual: {0} != {1}'.format(len(no_gxs), len(xs)))
device = backend.get_device_from_array(*xs)
if device.xp is chainerx:
if params and not is_immutable_params:
raise NotImplementedError(
'gradient_check does not support params argument for '
'ChainerX arrays')
self.device = device
self.func = func
self.xs = xs
self.gys = gys
self.params = params
self.no_gxs = no_gxs
self.atol = atol
self.rtol = rtol
self.is_immutable_params = is_immutable_params
# options for numeric gradients
self.eps = eps
self.dtype = dtype
self.detect_nondifferentiable = detect_nondifferentiable
def run(self):
with chainer.using_device(self.device):
self._run()
def _run(self):
# Run a forward pass for backward gradients.
# Uninitialized parameters may be initialized.
# If self.gys is None, it is also updated with 1s.
# This must be done before sampling a direction vector, because
# otherwise the shapes of uninitialized parameters wouldn't be
# determined.
xs_backward, ys, params_backward = (
self._forward_for_backward_gradients())
# Keep output arrays to save computation in numerical gradients
ys0 = tuple([None if y is None else y.array for y in ys])
# If gys is not given, generate the all-1 gradients.
if self.gys is None:
if not (len(ys) == 1 and ys[0].shape == ()):
raise ValueError(
'y_grad argument cannot be omitted if the target function '
'is not a loss function, which has a single output with '
'shape ().\n'
'Actual output shapes: {}'.format(
', '.join([str(y.shape) for y in ys])))
self.gys = tuple([_ones_like(y.array) for y in ys])
else:
_check_outputs_and_grad_outputs(ys, self.gys)
# Strike out gys corresponding to None y
self.gys = tuple([
None if y is None else gy for gy, y in zip(self.gys, ys0)])
# Sample a direction vector.
directions = self._sample_directions()
# Compute backward gradients by running a backward pass.
gx_backward = self._directional_backward_gradients(
xs_backward, ys, params_backward, directions)
# Compute numeric gradients
gx_numeric = self._directional_numeric_gradients(directions, ys0)
# Compare the resulted gradients
self._compare_gradients(gx_numeric, gx_backward, directions)
def _compare_gradients(self, gx_numeric, gx_backward, directions):
atol = self.atol
rtol = self.rtol
# Compare the gradients
try:
testing.assert_allclose(
gx_numeric, gx_backward, atol=atol, rtol=rtol)
except AssertionError as e:
eps = self.eps
xs = self.xs
gys = self.gys
f = six.StringIO()
f.write('check_backward failed (eps={} atol={} rtol={})\n'.format(
eps, atol, rtol))
for i, x in enumerate(xs):
f.write('inputs[{}]:\n'.format(i))
f.write('{}\n'.format(x))
for i, gy in enumerate(gys):
f.write('grad_outputs[{}]:\n'.format(i))
f.write('{}\n'.format(gy))
for i, d in enumerate(directions):
f.write('directions[{}]:\n'.format(i))
f.write('{}\n'.format(d))
f.write('gradients (numeric): {}\n'.format(gx_numeric))
f.write('gradients (backward): {}\n'.format(gx_backward))
f.write('\n')
f.write('x: numeric gradient, y: backward gradient')
f.write(str(e))
raise AssertionError(f.getvalue())
def _sample_directions(self):
# Samples a direction vector (list of arrays with the same shapes as
# input arrays and parameters)
device = self.device
xs = self.xs
params = self.params
no_gxs = self.no_gxs
xp = device.xp
direction_xs_shapes = [
None if x is None
else x.shape for x, no_gx in six.moves.zip(xs, no_gxs)
if not no_gx]
direction_param_shapes = [p.shape for p in params]
direction_shapes = direction_xs_shapes + direction_param_shapes
directions = [
None if shape is None
else xp.random.normal(size=shape) for shape in direction_shapes]
# The direction vector is normalized in order to keep the scale of
# differentiation error invariant with respect to the number of input
# dimensions. Ideally, the scale of the curvature with respect to each
# input dimension should be taken into account, but we ignore the
# differences and assume that the curvature is uniform with respect to
# all the input dimensions.
norm = math.sqrt(
sum([0 if d is None else xp.square(d).sum() for d in directions]))
if norm != 0:
# norm could be zero if input arrays are 0-sized.
scale = 1. / norm
directions = [None if d is None else d * scale for d in directions]
return directions
def _clear_grads(self, xs):
for x in xs:
if x is None:
continue
x.grad_var = None
def _forward_for_backward_gradients(self):
func = self.func
xs = self.xs
params = self.params
xs = [
None if x is None
else variable.Variable(x, requires_grad=x.dtype.kind == 'f')
for x in xs]
if self.is_immutable_params:
params = tuple([chainer.Parameter(p) for p in params])
ys = func(xs, params)
else:
ys = func(*xs)
ys = _as_tuple(ys)
# Clear gradients which may exist if func calls backward inside of
# itself.
self._clear_grads(xs)
self._clear_grads(params)
return xs, ys, params
def _directional_backward_gradients(self, xs, ys, params, directions):
no_gxs = self.no_gxs
gys = (
[None if gy is None
# Copy is needed to avoid being updated during backprop, which
# would affect the numerical gradient.
# TODO(niboshi): Preserve strides, for testing purpose.
else chainer.Variable(gy.copy(), requires_grad=False)
for gy in self.gys])
# Backward
chainer.backward(ys, gys)
for no_gx, x in six.moves.zip(no_gxs, xs):
if no_gx and x.grad is not None:
raise RuntimeError(
'gradient of int variable must be None')
grads = (
[None if x is None
else x.grad for x, no_gx in six.moves.zip(xs, no_gxs)
if not no_gx]
+ [p.grad for p in params])
gx_accum = 0
assert len(grads) == len(directions)
for g, direction in six.moves.zip(grads, directions):
if g is not None:
assert direction is not None
gx_accum += (g.astype(numpy.float64) * direction).sum()
return gx_accum
def _directional_numeric_gradients(self, directions, y0_data):
device = self.device
func = self.func
xs = self.xs
gys = self.gys
params = self.params
eps = self.eps
no_gxs = self.no_gxs
dtype = self.dtype
detect_nondifferentiable = self.detect_nondifferentiable
params_data = [
p if self.is_immutable_params else p.array for p in params]
xp = device.xp
x_vars = [variable.Variable(x, requires_grad=False) for x in xs]
xs_filtered = [
x.array for x, no_gx in six.moves.zip(x_vars, no_gxs) if not no_gx]
if dtype is None:
casted_data = [x for x in xs_filtered + params_data]
else:
if numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
# Even skipped variable must have the same dtype.
for x, no_gx in six.moves.zip(x_vars, no_gxs):
if no_gx and x.array.dtype.kind == 'f':
x.array = x.array.astype(dtype, copy=False)
casted_data = [
None if x is None else x.astype(dtype, copy=False)
for x in xs_filtered + params_data]
delta = xp.array(0., numpy.float64)
def g():
# This functions is called twice in `numerical_grad`.
# `delta` is `epsilon` or `-epsilon` in these calls.
# See the document of `numerical_grad`.
def perturb(data, direction):
if data is None:
assert direction is None
return data
data = (data.astype(numpy.float64)
+ delta * direction).astype(data.dtype)
if numpy.isscalar(data):
data = xp.array(data)
return data
# Input arrays
g_x_vars = []
j = 0
for x_var, no_gx in six.moves.zip(x_vars, no_gxs):
if no_gx:
g_x_vars.append(x_var)
else:
data = perturb(casted_data[j], directions[j])
g_x_vars.append(
None if data is None else variable.Variable(data))
j += 1
# Parameters
for i in range(len(params)):
data = perturb(casted_data[j + i], directions[j + i])
if self.is_immutable_params:
# Update the parameter array since it is converted into
# a Parameter just before calling the func.
params_data[i] = data
else:
# Update the given Parameter in-place since the object is
# held by the caller.
params[i].array = data
# Clear gradients to support func that calls backward inside of
# itself.
self._clear_grads(g_x_vars)
if not self.is_immutable_params:
self._clear_grads(params)
if self.is_immutable_params:
ps = tuple([chainer.Parameter(p) for p in params_data])
ys = func(g_x_vars, ps)
else:
ys = func(*g_x_vars)
ys = _as_tuple(ys)
ys_data = tuple([None if y is None else y.array for y in ys])
if xp is chainerx:
ys_data = tuple([
None if y is None else y.as_grad_stopped()
for y in ys_data])
if not self.is_immutable_params:
for i, param in enumerate(params):
param.array = casted_data[j + i]
return ys_data
gx, = numerical_grad(
g, (delta,), gys, eps=eps,
detect_nondifferentiable=detect_nondifferentiable,
center_outputs=y0_data, diff_atol=0, diff_rtol=self.rtol)
return gx
def check_backward(
func, x_data, y_grad, params=(),
eps=1e-3, atol=1e-5, rtol=1e-4, no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test backward procedure of a given function.
This function automatically checks the backward-process of a given function
to ensure that the computed gradients are approximately correct.
For example, assuming you've defined a :class:`~chainer.FunctionNode` class
``MyFunc``, that takes two arguments and returns one value, you can wrap
it in a ordinary function and check its gradient computations as follows:
.. code-block:: python
def func(xs):
y, = MyFunc().apply(xs)
return y
x1_data = xp.array(...)
x2_data = xp.array(...)
gy_data = xp.array(...)
check_backward(func, (x1_data, x2_data), gy_data)
This function creates :class:`~chainer.Variable` objects with ``x_data``
and calls ``func`` with the :class:`~chainer.Variable`\\ s to get its
result as :class:`~chainer.Variable`.
Then, it sets ``y_grad`` array to ``grad`` attribute of the result and
calls ``backward`` method to get gradients of the inputs.
To check correctness of the gradients, the function calls
:func:`numerical_grad` to calculate numerically the gradients and compares
the types of gradients with :func:`chainer.testing.assert_allclose`.
To reduce computational time, it uses directional derivative along a
random vector. A function
:math:`g: \\mathbb{R} \\rightarrow \\mathbb{R}^n` is defined as
:math:`g(\\delta) = f(x + \\delta r)`, where
:math:`\\delta \\in \\mathbb{R}`, :math:`r \\in \\mathbb{R}^n`
is a random vector
and :math:`f` is a function which you want to test.
Its gradient is
.. math::
g'(\\delta) = f'(x + \\delta r) \\cdot r.
Therefore, :math:`g'(0) = f'(x) \\cdot r`.
So we can check the correctness of back propagation of :math:`f` indirectly
by comparing this equation with the gradient of :math:`g` numerically
calculated and that of :math:`f` computed by backprop.
If :math:`r` is chosen from uniform distribution, we can conclude with
high probability that the gradient of :math:`f` itself is correct.
If the function is non-differentiable with respect to some input objects,
we can check its backprop to such objects by ``no_grads`` argument.
``gradient_check`` computes numerical backward to inputs that correspond to
``False`` in ``no_grads``. It also asserts that the backprop leaves
gradients ``None`` for inputs that correspond to ``True`` in ``no_grads``.
The default of ``no_grads`` argument is the tuple of truth values whether
input objects (``x1_data`` or/and ``x2_data`` in this example) represent
integer variables.
You can simplify a test when ``MyFunc`` gets only one argument:
.. code-block:: python
check_backward(func, x1_data, gy_data)
If ``MyFunc`` is a loss function which returns a zero-dimensional
array, pass ``None`` to ``gy_data``. In this case, it sets ``1`` to
``grad`` attribute of the result:
.. code-block:: python
check_backward(my_loss_func,
(x1_data, x2_data), None)
If ``MyFunc`` returns multiple outputs, pass all gradients for outputs
as a tuple:
.. code-block:: python
gy1_data = xp.array(...)
gy2_data = xp.array(...)
check_backward(func, x1_data, (gy1_data, gy2_data))
You can also test a :class:`~chainer.Link`.
To check gradients of parameters of the link, set a tuple of the parameters
to ``params`` arguments:
.. code-block:: python
check_backward(my_link, (x1_data, x2_data), gy_data,
(my_link.W, my_link.b))
Note that ``params`` are not ``ndarray``\\ s,
but :class:`~chainer.Variables`\\ s.
Function objects are acceptable as ``func`` argument:
.. code-block:: python
check_backward(lambda x1, x2: f(x1, x2),
(x1_data, x2_data), gy_data)
.. note::
``func`` is called many times to get numerical gradients for all inputs.
This function doesn't work correctly when ``func`` behaves randomly as
it gets different gradients.
Args:
func (callable): A function which gets :class:`~chainer.Variable`\\ s
and returns :class:`~chainer.Variable`\\ s. ``func`` must returns
a tuple of :class:`~chainer.Variable`\\ s or one
:class:`~chainer.Variable`. You can use a
:class:`~chainer.Function`, :class:`~chainer.FunctionNode` or a
:class:`~chainer.Link` object or any other function satisfying the
condition.
x_data (ndarray or tuple of ndarrays): A set of ``ndarray``\\ s to be
passed to ``func``. If ``x_data`` is one ``ndarray`` object, it is
treated as ``(x_data,)``.
y_grad (ndarray or tuple of ndarrays or None):
A set of ``ndarray``\\ s representing gradients of return-values of
``func``. If ``y_grad`` is one ``ndarray`` object, it is
treated as ``(y_grad,)``. If ``func`` is a loss-function,
``y_grad`` should be set to ``None``.
params (~chainer.Variable or tuple of ~chainder.Variable):
A set of :class:`~chainer.Variable`\\ s whose gradients are
checked. When ``func`` is a :class:`~chainer.Link` object,
set its parameters as ``params``.
If ``params`` is one :class:`~chainer.Variable` object,
it is treated as ``(params,)``.
eps (float): Epsilon value to be passed to :func:`numerical_grad`.
atol (float): Absolute tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
rtol (float): Relative tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
no_grads (list of bool): Flag to skip variable for gradient assertion.
It should be same length as ``x_data``.
dtype (~numpy.dtype): ``x_data``, ``y_grad`` and ``params`` are casted
to this dtype when calculating numerical gradients. Only float
types and ``None`` are allowed.
detect_nondifferentiable (bool):
If ``True``, check for non-differentiable inputs is enabled.
If ``func`` is non-differentiable at ``x_data``, ``check_backward``
raises :class:`~chainer.gradient_check.NondifferentiableError`.
.. seealso::
:func:`numerical_grad`
"""
_CheckBackward(
func, x_data, y_grad, params, eps, atol, rtol, no_grads, dtype,
detect_nondifferentiable, is_immutable_params=False
).run()
def _check_backward_with_params(
# This function was introduced along with the `is_immutable_params`
# argument to `_CheckBackward`.
# It allows passing `params` as ndarrays instead of `Parameter`s and thus
# depends less on the state of the parameter held by the caller.
# It is required by the `LinkTestCase` to check ChainerX parameter
# gradients, since those parameters cannot perturbed in-place for the
# numerical gradients if passed as `Parameter`s as those requiring
# gradients cannot be updated in-place.
func, x_data, y_grad, params=(),
eps=1e-3, atol=1e-5, rtol=1e-4, no_grads=None, dtype=None,
detect_nondifferentiable=False):
assert all(isinstance(p, chainer.get_array_types()) for p in params)
_CheckBackward(
func, x_data, y_grad, params, eps, atol, rtol, no_grads, dtype,
detect_nondifferentiable, is_immutable_params=True
).run()
def check_double_backward(func, x_data, y_grad, x_grad_grad, params=(),
params_grad_grad=(), eps=1e-3, atol=1e-4, rtol=1e-3,
no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test twice differentiation of a given procedure.
This function automatically checks if the backward procedure of ``func``
is correctly implemented for further differentiation. It first computes the
gradient of ``func`` w.r.t. its inputs in the same way as
:func:`~chainer.gradient_check.check_backward`. This function then further
invokes the backward procedure against the gradient variables, starting
from the initial gradient given by ``x_grad_grad``. It also computes the
second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
resulting gradients are compared to confirm if the second-order gradients
are approximately correct.
Note that this function **DOES NOT** check if the first-order
differentiation is correct; the numerical gradient assumes that the
first-order gradient given by the usual :meth:`chainer.Variable.backward`
is correct. The implementation of each differentiable function should be
tested by :func:`~chainer.gradient_check.check_backward` first, and then
should be tested by this function if neccessary.
For the details of the arguments, see
:func:`~chainer.gradient_check.check_backward`. The additional arguments
``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
:class:`~chainer.Variable` (s) that include the initial gradient
corresponding to the first-order gradient of each input and parameter. Note
that the default error tolerance ``atol`` and ``rtol`` are slightly larger
than those of :func:`~chainer.gradient_check.check_backward` because the
numerical gradients of the second order differentiation are less accurate
than those of the first order gradients.
"""
# Rename variables
xs = x_data
gys = y_grad
ggxs = x_grad_grad
ggparams = params_grad_grad
no_gxs = no_grads
del x_data
del y_grad
del x_grad_grad
del params_grad_grad
del no_grads
xs = _as_tuple(xs)
params = _as_tuple(params)
gys = _as_tuple(gys)
ggxs = _as_tuple(ggxs)
ggparams = _as_tuple(ggparams)
n_x = len(xs)
first_order_no_gxs = [x.dtype.kind != 'f' for x in xs]
def first_order_grad(*inputs):
xs = inputs[:n_x]
gys = inputs[n_x:]
ys = _as_tuple(func(*xs))
# `gys` (inputs to `first_order_grad` forward function) may have been
# casted to float64 by `numerical_grad`. For certain functions demoting
# the dtypes (e.g. `F.cast` that casts to float16), the dtypes of `ys`
# (e.g. outputs of `F.cast`) and `gys` (e.g. given by `numerical_grad`)
# may mismatch and we need to align those dtypes here.
gys = [
None if gy is None
else chainer.functions.cast(gy, y.dtype) for y, gy in zip(ys, gys)]
_check_outputs_and_grad_outputs(ys, gys)
chainer.backward(ys, gys, enable_double_backprop=True)
gxs = []
errors = []
for i, (no_gx, x) in enumerate(six.moves.zip(first_order_no_gxs, xs)):
if no_gx:
if x.grad is not None:
errors.append(
'[{}]: Gradient was calculated while expected to not.'
.format(i))
else:
if x.grad is None:
gxs.append(None)
else:
gxs.append(x.grad_var)
if len(errors) > 0:
f = six.StringIO()
f.write('There are errors retrieving first-order gradients:\n')
f.write('Inputs: {}\n'.format(utils._format_array_props(xs)))
f.write('Skip: {}\n'.format(
', '.join(str(no_gx) for no_gx in first_order_no_gxs)))
f.write('Errors:\n')
for error in errors:
f.write('{}\n'.format(error))
raise RuntimeError(f.getvalue())
return tuple(gxs + [p.grad_var for p in params])
inputs = xs + gys
grad_grad = ggxs + ggparams
try:
check_backward(first_order_grad, inputs, grad_grad, params=params,
eps=eps, atol=atol, rtol=rtol, no_grads=no_gxs,
dtype=dtype,
detect_nondifferentiable=detect_nondifferentiable)
except AssertionError as e:
f = six.StringIO()
f.write('check_double_backward failed '
'(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
for i, x in enumerate(xs):
f.write('input[{}]:\n'.format(i))
f.write('{}\n'.format(x))
for i, gy in enumerate(gys):
f.write('grad_output[{}]:\n'.format(i))
f.write('{}\n'.format(gy))
for i, ggx in enumerate(ggxs):
f.write('grad_grad_input[{}]:\n'.format(i))
f.write('{}\n'.format(ggx))
for i, ggp in enumerate(ggparams):
f.write('grad_grad_param[{}]:\n'.format(i))
f.write('{}\n'.format(ggp))
f.write('\n')
f.write(str(e))
utils._raise_from(AssertionError, f.getvalue(), e)
| mit | 3,402,096,022,491,628,000 | 39.094376 | 79 | 0.555315 | false |
code-for-india/sahana_shelter_worldbank | private/templates/Sandy/controllers.py | 1 | 8864 | # -*- coding: utf-8 -*-
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
auth = current.auth
if auth.is_logged_in():
# Redirect to Map
redirect(URL(c="hms", f="hospital", args=["map"]))
request = current.request
response = current.response
response.title = current.deployment_settings.get_system_name()
T = current.T
db = current.db
s3db = current.s3db
s3 = response.s3
appname = request.application
settings = current.deployment_settings
# Check logged in and permissions
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", s3db.hms_hospital):
hospital_items = self.hospital()
datatable_ajax_source = "/%s/default/hospital.aadata" % \
appname
s3.actions = None
hospital_box = DIV(H3(T("Hospitals")),
A(T("Create Hospital"),
_href = URL(c="hms", f="hospital",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right:10px;"),
hospital_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
hospital_box = ""
datatable_ajax_source = ""
item = ""
if settings.has_module("cms"):
table = s3db.cms_post
item = db(table.module == "default").select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
view = path.join(request.folder, "private", "templates",
"Sandy", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
return dict(title = response.title,
item = item,
hospital_box = hospital_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -------------------------------------------------------------------------
@staticmethod
def hospital():
"""
Function to handle pagination for the hospitals list
on the homepage
"""
request = current.request
get_vars = request.get_vars
resource = current.s3db.resource("hms_hospital")
totalrows = resource.count()
if "iDisplayLength" in get_vars:
display_length = int(request.get_vars["iDisplayLength"])
else:
display_length = 10
limit = 4 * display_length
list_fields = ["id", "name"]
filter, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields, rows)
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
if request.extension == "html":
items = dt.html(totalrows,
totalrows,
"hospital_list_1",
dt_displayLength=display_length,
dt_ajax_url=URL(c="default",
f="hospital",
extension="aadata",
vars={"id": "hospital_list_1"},
),
dt_pagination="true",
)
elif request.extension == "aadata":
if "sEcho" in request.vars:
echo = int(request.vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
"hospital_list_1",
echo)
else:
from gluon.http import HTTP
raise HTTP(501, current.ERROR.BAD_FORMAT)
return items
# END =========================================================================
| mit | 2,980,182,400,315,720,700 | 36.880342 | 118 | 0.463109 | false |
M0n0xy2/mpsi_python_cours | python/charles/mini-projet 3/PPCLS.py | 1 | 2272 | # -*- coding: utf-8 -*-
# ***************************
# PCSIB DM3 2015
# Nom: Dizier Charles
# ****************************
import random
equi_num_object = {
1: "pierre",
2: "papier",
3: "ciseaux",
4: "lezard",
5: "spock"
}
winner_dict = {
"papier, ciseaux": False,
"ciseaux, papier": True,
"papier, pierre": True,
"pierre, papier": False,
"pierre, lezard": True,
"lezard, pierre": False,
"lezard, spock": True,
"spock, lezard": False,
"spock, ciseaux": True,
"ciseaux, spock": False,
"lezard, ciseaux": False,
"ciseaux, lezard": True,
"papier, lezard": False,
"lezard, papier": True,
"papier, spock": True,
"spock, papier": False,
"spock, pierre": True,
"pierre, spock": False,
"pierre, ciseaux": True,
"ciseaux, pierre": True
}
def combat(player, ia):
global ia_score, player_score
combat_str = "{}, {}".format(equi_num_object[player], equi_num_object[ia])
player_winner = winner_dict[combat_str]
if player_winner:
player_score += 1
elif not player_winner:
ia_score += 1
def enregistrevainqueur():
out_file = open("resultatsPPCLS.txt", "a")
if ia_score > player_score:
winner = '"IA"'
winner_score, looser_score = ia_score, player_score
else:
winner = '"Joueur"'
winner_score, looser_score = player_score, ia_score
print("Vainqueur : {} sur un score de {} contre {} points".format(winner, winner_score, looser_score), file=out_file)
if __name__ == "__main__":
ia_score = 0
player_score = 0
while ia_score < 5 and player_score < 5:
print("Choix de jeu :")
for num, obj in equi_num_object.items():
print("{} pour {}".format(num, obj))
player_play = int(input('Entrez 1,2,3,4 ou 5 selon votre choix de jeu: '))
ia_play = random.randint(1, 5)
if player_play == ia_play:
print("Egalité ! On recommence")
continue
combat(player_play, ia_play)
print("===================")
print("Scores actuels : ")
print("Joueur: {} point(s)".format(player_score))
print("IA: {} point(s)".format(ia_score))
print("===================")
enregistrevainqueur()
| mit | 5,174,479,759,390,140,000 | 27.3875 | 121 | 0.547336 | false |
franziz/artagger | artagger/Utility/Eval.py | 1 | 2652 | # -*- coding: utf-8 -*-
import os
import sys
os.chdir("../")
sys.setrecursionlimit(100000)
sys.path.append(os.path.abspath(""))
os.chdir("./Utility")
from Utility.Utils import getWordTag, readDictionary
def computeAccuracy(goldStandardCorpus, taggedCorpus):
tagged = open(taggedCorpus, "r").read().split()
goldStandard = open(goldStandardCorpus, "r").read().split()
if len(tagged) != len(goldStandard):
print("The numbers of word tokens in %s and %s are not equal!" % (goldStandardCorpus, taggedCorpus))
return 0
numwords = 0
count = 0
for i in range(len(tagged)):
numwords += 1
word1, tag1 = getWordTag(tagged[i])
word2, tag2 = getWordTag(goldStandard[i])
if word1 != word2 and word1 != "''" and word2 != "''":
print("Words are not the same in gold standard and tagged corpora, at the index", i)
return 0
if tag1.lower() == tag2.lower():
count += 1
return count * 100.0 / numwords
def computeAccuracies(fullDictFile, goldStandardCorpus, taggedCorpus):
"""
Return known-word accuracy, unknown-word accuracy and the overall accuracy
"""
tagged = open(taggedCorpus, "r").read().split()
goldStandard = open(goldStandardCorpus, "r").read().split()
if len(tagged) != len(goldStandard):
print("The numbers of word tokens in %s and %s are not equal!" % (goldStandardCorpus, taggedCorpus))
return 0
fullDICT = readDictionary(fullDictFile)
numwords = count = 0
countKN = countUNKN = 0
countCorrectKN = countCorrectUNKN = 0
for i in range(len(tagged)):
numwords += 1
word1, tag1 = getWordTag(tagged[i])
word2, tag2 = getWordTag(goldStandard[i])
if word1 != word2 and word1 != "''" and word2 != "''":
print("Words are not the same in gold standard and tagged corpora, at the index", i)
return 0
if tag1.lower() == tag2.lower():
count += 1
if word1 in fullDICT:
countKN += 1
if tag1.lower() == tag2.lower():
countCorrectKN += 1
else:
countUNKN += 1
if tag1.lower() == tag2.lower():
countCorrectUNKN += 1
if countUNKN == 0:
return countCorrectKN * 100.0 / countKN, 0.0, count * 100.0 / numwords
else:
return countCorrectKN * 100.0 / countKN, countCorrectUNKN * 100.0 / countUNKN, count * 100.0 / numwords
if __name__ == "__main__":
print(computeAccuracy(sys.argv[1], sys.argv[2]), "%")
pass
| apache-2.0 | -2,303,381,570,509,057,000 | 33.441558 | 111 | 0.584087 | false |
tsbischof/photon_correlation | scripts/plot_intensity.py | 1 | 2283 | #!/usr/bin/env python3
import csv
import sys
import argparse
import matplotlib.pyplot as plt
import photon_correlation as pc
def intensity_from_stream(stream):
for line in csv.reader(stream):
time_left = int(line[0])
time_right = int(line[1])
counts = map(int, line[2:])
yield(((time_left, time_right), counts))
def plot_intensity(intensity, mode="t2"):
plt.clf()
if mode == "t2":
times = list(map(lambda x: float(x[0][0])/1e12, intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0])*10**12,
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Time/s")
plt.ylabel("PL intensity/(counts/second)")
elif mode == "t3":
times = list(map(lambda x: float(x[0][0]), intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0]),
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Pulse number")
plt.ylabel("PL intensity/(counts/pulse)")
else:
raise(ValueError("Unknown mode: {0}".format(mode)))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot an intensity.")
parser.add_argument("--mode", default="t2", type=str,
help="Mode of the photons, either t2 or t3.")
parser.add_argument("files", type=str, nargs="*",
help="Filenames containing g2 data to plot.")
args = parser.parse_args()
for filename in args.files:
intensity = pc.Intensity(filename=filename)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
intensity.normalized().add_to_axes(ax)
plt.show(fig)
# with open(filename) as stream_in:
# intensity = list(intensity_from_stream(stream_in))
#
# plot_intensity(intensity, mode=args.mode)
| bsd-3-clause | 7,184,816,256,500,360,000 | 28.269231 | 70 | 0.526938 | false |
rlindner81/pyload | module/plugins/hoster/ZbigzCom.py | 1 | 4492 | # -*- coding: utf-8 -*-
import random
import re
import time
import urlparse
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import json
class ZbigzCom(Hoster):
__name__ = "ZbigzCom"
__type__ = "hoster"
__version__ = "0.02"
__status__ = "testing"
__pattern__ = r'https?://.+\.torrent|magnet:\?.+'
__config__ = [("activated", "bool", "Activated", False)]
__description__ = """Zbigz.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
def jquery_call(self, url, file_id, call_id, **kwargs):
current_millis = int(time.time() * 1000)
json_callback = "jQuery" + call_id + "_" + str(current_millis)
urlp = urlparse.urlparse(url)
get_params = kwargs.copy()
get_params.update(urlparse.parse_qs(urlp.query))
get_params['hash'] = file_id
get_params['jsoncallback'] = json_callback
get_params['_'] = current_millis
jquery_data = self.load(
urlp.scheme +
"://" +
urlp.netloc +
urlp.path,
get=get_params)
m = re.search("%s\((.+?)\);" % json_callback, jquery_data)
return json.loads(m.group(1)) if m else None
def sleep(self, sec):
for _i in range(sec):
if self.pyfile.abort:
break
time.sleep(1)
def process(self, pyfile):
self.data = self.load("http://m.zbigz.com/myfiles",
post={'url': pyfile.url})
if "Error. Only premium members are able to download" in self.data:
self.fail(_("File can be downloaded by premium users only"))
m = re.search(r'&hash=(\w+)"', self.data)
if m is None:
self.fail("Hash not found")
file_id = m.group(1)
call_id = "".join([random.choice("0123456789") for _x in range(20)])
self.pyfile.setCustomStatus("torrent")
self.pyfile.setProgress(0)
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
pyfile.name = json_data['info']['name'] + \
(".zip" if len(json_data['files']) > 1 else "")
pyfile.size = json_data['info']['size']
while True:
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['info']['progress'])
pyfile.setProgress(progress)
if json_data['info']['state'] != "downloading" or progress == 100:
break
self.sleep(5)
pyfile.setProgress(100)
if len(json_data['files']) == 1:
download_url = "http://m.zbigz.com/file/%s/0" % file_id
else:
self.data = self.load("http://m.zbigz.com/file/%s/-1" % file_id)
m = re.search(
r'\'(http://\w+.zbigz.com/core/zipstate.php\?hash=%s&did=(\w+)).+?\'' %
file_id, self.data)
if m is None:
self.fail("Zip state URL not found")
zip_status_url = m.group(1)
download_id = m.group(2)
m = re.search(
r'\'(http://\w+.zbigz.com/z/%s/.+?)\'' %
download_id, self.data)
if m is None:
self.fail("Zip download URL not found")
download_url = m.group(1)
self.pyfile.setCustomStatus("zip")
self.pyfile.setProgress(0)
while True:
json_data = self.jquery_call(zip_status_url, file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['proc'])
self.pyfile.setProgress(progress)
if progress == 100:
break
self.sleep(5)
self.download(download_url)
self.load("http://m.zbigz.com/delete.php?hash=%s" % file_id)
| gpl-3.0 | 4,467,117,906,312,538,000 | 29.557823 | 87 | 0.518923 | false |
jptomo/rpython-lang-scheme | rpython/rtyper/rclass.py | 1 | 47390 | import sys
import types
from rpython.flowspace.model import Constant
from rpython.flowspace.operation import op
from rpython.annotator import description, model as annmodel
from rpython.rlib.objectmodel import UnboxedValue
from rpython.tool.pairtype import pairtype, pair
from rpython.tool.identity_dict import identity_dict
from rpython.tool.flattenrec import FlattenRecursion
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lltype import (
Ptr, Struct, GcStruct, malloc, cast_pointer, castable, nullptr,
RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, Void, FuncType, Bool, Signed,
functionptr)
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.rmodel import (
Repr, getgcflavor, inputconst, warning, mangle)
class FieldListAccessor(object):
def initialize(self, TYPE, fields):
assert type(fields) is dict
self.TYPE = TYPE
self.fields = fields
for x in fields.itervalues():
assert isinstance(x, ImmutableRanking)
def all_immutable_fields(self):
result = set()
for key, value in self.fields.iteritems():
if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY):
result.add(key)
return result
def __repr__(self):
return '<FieldListAccessor for %s>' % getattr(self, 'TYPE', '?')
class ImmutableRanking(object):
def __init__(self, name, is_immutable):
self.name = name
self.is_immutable = is_immutable
def __nonzero__(self):
return self.is_immutable
def __repr__(self):
return '<%s>' % self.name
IR_MUTABLE = ImmutableRanking('mutable', False)
IR_IMMUTABLE = ImmutableRanking('immutable', True)
IR_IMMUTABLE_ARRAY = ImmutableRanking('immutable_array', True)
IR_QUASIIMMUTABLE = ImmutableRanking('quasiimmutable', False)
IR_QUASIIMMUTABLE_ARRAY = ImmutableRanking('quasiimmutable_array', False)
class ImmutableConflictError(Exception):
"""Raised when the _immutable_ or _immutable_fields_ hints are
not consistent across a class hierarchy."""
def getclassrepr(rtyper, classdef):
if classdef is None:
return rtyper.rootclass_repr
result = classdef.repr
if result is None:
result = classdef.repr = ClassRepr(rtyper, classdef)
rtyper.add_pendingsetup(result)
return result
def getinstancerepr(rtyper, classdef, default_flavor='gc'):
if classdef is None:
flavor = default_flavor
else:
flavor = getgcflavor(classdef)
try:
result = rtyper.instance_reprs[classdef, flavor]
except KeyError:
result = buildinstancerepr(rtyper, classdef, gcflavor=flavor)
rtyper.instance_reprs[classdef, flavor] = result
rtyper.add_pendingsetup(result)
return result
def buildinstancerepr(rtyper, classdef, gcflavor='gc'):
from rpython.rtyper.rvirtualizable import VirtualizableInstanceRepr
if classdef is None:
unboxed = []
virtualizable = False
else:
unboxed = [subdef for subdef in classdef.getallsubdefs() if
subdef.classdesc.pyobj is not None and
issubclass(subdef.classdesc.pyobj, UnboxedValue)]
virtualizable = classdef.classdesc.read_attribute(
'_virtualizable_', Constant(False)).value
config = rtyper.annotator.translator.config
usetagging = len(unboxed) != 0 and config.translation.taggedpointers
if virtualizable:
assert len(unboxed) == 0
assert gcflavor == 'gc'
return VirtualizableInstanceRepr(rtyper, classdef)
elif usetagging:
# the UnboxedValue class and its parent classes need a
# special repr for their instances
if len(unboxed) != 1:
raise TyperError("%r has several UnboxedValue subclasses" % (
classdef,))
assert gcflavor == 'gc'
from rpython.rtyper.lltypesystem import rtagged
return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0])
else:
return InstanceRepr(rtyper, classdef, gcflavor)
class MissingRTypeAttribute(TyperError):
pass
# ____________________________________________________________
#
# There is one "vtable" per user class, with the following structure:
# A root class "object" has:
#
# struct object_vtable {
# // struct object_vtable* parenttypeptr; not used any more
# RuntimeTypeInfo * rtti;
# Signed subclassrange_min; //this is also the id of the class itself
# Signed subclassrange_max;
# RPyString * name;
# struct object * instantiate();
# }
#
# Every other class X, with parent Y, has the structure:
#
# struct vtable_X {
# struct vtable_Y super; // inlined
# ... // extra class attributes
# }
# The type of the instances is:
#
# struct object { // for the root class
# struct object_vtable* typeptr;
# }
#
# struct X {
# struct Y super; // inlined
# ... // extra instance attributes
# }
#
# there's also a nongcobject
OBJECT_VTABLE = lltype.ForwardReference()
CLASSTYPE = Ptr(OBJECT_VTABLE)
OBJECT = GcStruct('object', ('typeptr', CLASSTYPE),
hints={'immutable': True, 'shouldntbenull': True,
'typeptr': True},
rtti=True)
OBJECTPTR = Ptr(OBJECT)
OBJECT_VTABLE.become(Struct('object_vtable',
#('parenttypeptr', CLASSTYPE),
('subclassrange_min', Signed),
('subclassrange_max', Signed),
('rtti', Ptr(RuntimeTypeInfo)),
('name', Ptr(rstr.STR)),
('hash', Signed),
('instantiate', Ptr(FuncType([], OBJECTPTR))),
hints={'immutable': True}))
# non-gc case
NONGCOBJECT = Struct('nongcobject', ('typeptr', CLASSTYPE))
NONGCOBJECTPTR = Ptr(NONGCOBJECT)
OBJECT_BY_FLAVOR = {'gc': OBJECT, 'raw': NONGCOBJECT}
LLFLAVOR = {'gc': 'gc', 'raw': 'raw', 'stack': 'raw'}
def cast_vtable_to_typeptr(vtable):
while typeOf(vtable).TO != OBJECT_VTABLE:
vtable = vtable.super
return vtable
def alloc_array_name(name):
return rstr.string_repr.convert_const(name)
class ClassRepr(Repr):
def __init__(self, rtyper, classdef):
self.rtyper = rtyper
self.classdef = classdef
self.vtable_type = lltype.ForwardReference()
self.lowleveltype = Ptr(self.vtable_type)
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<ClassRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'ClassR %s' % (clsname,)
def convert_desc(self, desc):
subclassdef = desc.getuniqueclassdef()
if self.classdef is not None:
if self.classdef.commonbase(subclassdef) != self.classdef:
raise TyperError("not a subclass of %r: %r" % (
self.classdef.name, desc))
r_subclass = getclassrepr(self.rtyper, subclassdef)
return r_subclass.getruntime(self.lowleveltype)
def convert_const(self, value):
if not isinstance(value, (type, types.ClassType)):
raise TyperError("not a class: %r" % (value,))
bk = self.rtyper.annotator.bookkeeper
return self.convert_desc(bk.getdesc(value))
def prepare_method(self, s_value):
# special-casing for methods:
# if s_value is SomePBC([MethodDescs...])
# return a PBC representing the underlying functions
if (isinstance(s_value, annmodel.SomePBC) and
s_value.getKind() == description.MethodDesc):
s_value = self.classdef.lookup_filter(s_value)
funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions]
return annmodel.SomePBC(funcdescs)
return None # not a method
def get_ll_eq_function(self):
return None
def _setup_repr(self):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
clsfields = {}
pbcfields = {}
allmethods = {}
# class attributes
llfields = []
for name, attrdef in self.classdef.attrs.items():
if attrdef.readonly:
s_value = attrdef.s_value
s_unboundmethod = self.prepare_method(s_value)
if s_unboundmethod is not None:
allmethods[name] = True
s_value = s_unboundmethod
r = self.rtyper.getrepr(s_value)
mangled_name = 'cls_' + name
clsfields[name] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
# attributes showing up in getattrs done on the class as a PBC
extra_access_sets = self.classdef.extra_access_sets
for access_set, (attr, counter) in extra_access_sets.items():
r = self.rtyper.getrepr(access_set.s_value)
mangled_name = mangle('pbc%d' % counter, attr)
pbcfields[access_set, attr] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
llfields.sort()
llfields.sort(key=attr_reverse_size)
#
self.rbase = getclassrepr(self.rtyper, self.classdef.basedef)
self.rbase.setup()
kwds = {'hints': {'immutable': True}}
vtable_type = Struct('%s_vtable' % self.classdef.name,
('super', self.rbase.vtable_type),
*llfields, **kwds)
self.vtable_type.become(vtable_type)
allmethods.update(self.rbase.allmethods)
self.clsfields = clsfields
self.pbcfields = pbcfields
self.allmethods = allmethods
self.vtable = None
def getvtable(self):
"""Return a ptr to the vtable of this type."""
if self.vtable is None:
self.init_vtable()
return cast_vtable_to_typeptr(self.vtable)
def getruntime(self, expected_type):
assert expected_type == CLASSTYPE
return self.getvtable()
def init_vtable(self):
"""Create the actual vtable"""
self.vtable = malloc(self.vtable_type, immortal=True)
vtable_part = self.vtable
r_parentcls = self
while r_parentcls.classdef is not None:
self.setup_vtable(vtable_part, r_parentcls)
vtable_part = vtable_part.super
r_parentcls = r_parentcls.rbase
self.fill_vtable_root(vtable_part)
def setup_vtable(self, vtable, r_parentcls):
"""Initialize the vtable portion corresponding to 'r_parentcls'."""
# setup class attributes: for each attribute name at the level
# of 'r_parentcls', look up its value in the class
def assign(mangled_name, value):
if (isinstance(value, Constant) and
isinstance(value.value, staticmethod)):
value = Constant(value.value.__get__(42)) # staticmethod => bare function
llvalue = r.convert_desc_or_const(value)
setattr(vtable, mangled_name, llvalue)
for fldname in r_parentcls.clsfields:
mangled_name, r = r_parentcls.clsfields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
assign(mangled_name, value)
# extra PBC attributes
for (access_set, attr), (mangled_name, r) in r_parentcls.pbcfields.items():
if self.classdef.classdesc not in access_set.descs:
continue # only for the classes in the same pbc access set
if r.lowleveltype is Void:
continue
attrvalue = self.classdef.classdesc.read_attribute(attr, None)
if attrvalue is not None:
assign(mangled_name, attrvalue)
def fill_vtable_root(self, vtable):
"""Initialize the head of the vtable."""
vtable.hash = hash(self)
# initialize the 'subclassrange_*' and 'name' fields
if self.classdef is not None:
#vtable.parenttypeptr = self.rbase.getvtable()
vtable.subclassrange_min = self.classdef.minid
vtable.subclassrange_max = self.classdef.maxid
else: # for the root class
vtable.subclassrange_min = 0
vtable.subclassrange_max = sys.maxint
rinstance = getinstancerepr(self.rtyper, self.classdef)
rinstance.setup()
if rinstance.gcflavor == 'gc':
vtable.rtti = getRuntimeTypeInfo(rinstance.object_type)
if self.classdef is None:
name = 'object'
else:
name = self.classdef.shortname
vtable.name = alloc_array_name(name)
if hasattr(self.classdef, 'my_instantiate_graph'):
graph = self.classdef.my_instantiate_graph
vtable.instantiate = self.rtyper.getcallable(graph)
#else: the classdef was created recently, so no instantiate()
# could reach it
def fromtypeptr(self, vcls, llops):
"""Return the type pointer cast to self's vtable type."""
self.setup()
castable(self.lowleveltype, vcls.concretetype) # sanity check
return llops.genop('cast_pointer', [vcls],
resulttype=self.lowleveltype)
fromclasstype = fromtypeptr
def getclsfield(self, vcls, attr, llops):
"""Read the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getclsfield(vcls, attr, llops)
def setclsfield(self, vcls, attr, vvalue, llops):
"""Write the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
llops.genop('setfield', [v_vtable, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setclsfield(vcls, attr, vvalue, llops)
def getpbcfield(self, vcls, access_set, attr, llops):
if (access_set, attr) not in self.pbcfields:
raise TyperError("internal error: missing PBC field")
mangled_name, r = self.pbcfields[access_set, attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
def rtype_issubtype(self, hop):
class_repr = get_type_repr(self.rtyper)
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
if isinstance(v_cls2, Constant):
cls2 = v_cls2.value
minid = hop.inputconst(Signed, cls2.subclassrange_min)
maxid = hop.inputconst(Signed, cls2.subclassrange_max)
return hop.gendirectcall(ll_issubclass_const, v_cls1, minid,
maxid)
else:
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
return hop.gendirectcall(ll_issubclass, v_cls1, v_cls2)
class RootClassRepr(ClassRepr):
"""ClassRepr for the root of the class hierarchy"""
classdef = None
def __init__(self, rtyper):
self.rtyper = rtyper
self.vtable_type = OBJECT_VTABLE
self.lowleveltype = Ptr(self.vtable_type)
def _setup_repr(self):
self.clsfields = {}
self.pbcfields = {}
self.allmethods = {}
self.vtable = None
def init_vtable(self):
self.vtable = malloc(self.vtable_type, immortal=True)
self.fill_vtable_root(self.vtable)
def get_type_repr(rtyper):
return rtyper.rootclass_repr
# ____________________________________________________________
class __extend__(annmodel.SomeInstance):
def rtyper_makerepr(self, rtyper):
return getinstancerepr(rtyper, self.classdef)
def rtyper_makekey(self):
return self.__class__, self.classdef
class __extend__(annmodel.SomeType):
def rtyper_makerepr(self, rtyper):
return get_type_repr(rtyper)
def rtyper_makekey(self):
return self.__class__,
class InstanceRepr(Repr):
def __init__(self, rtyper, classdef, gcflavor='gc'):
self.rtyper = rtyper
self.classdef = classdef
if classdef is None:
self.object_type = OBJECT_BY_FLAVOR[LLFLAVOR[gcflavor]]
else:
ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]]
self.object_type = ForwardRef()
self.iprebuiltinstances = identity_dict()
self.lowleveltype = Ptr(self.object_type)
self.gcflavor = gcflavor
def _setup_repr(self, llfields=None, hints=None, adtmeths=None):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
if self.classdef is None:
self.immutable_field_set = set()
self.rclass = getclassrepr(self.rtyper, self.classdef)
fields = {}
allinstancefields = {}
if self.classdef is None:
fields['__class__'] = 'typeptr', get_type_repr(self.rtyper)
else:
# instance attributes
attrs = self.classdef.attrs.items()
attrs.sort()
myllfields = []
for name, attrdef in attrs:
if not attrdef.readonly:
r = self.rtyper.getrepr(attrdef.s_value)
mangled_name = 'inst_' + name
fields[name] = mangled_name, r
myllfields.append((mangled_name, r.lowleveltype))
myllfields.sort(key=attr_reverse_size)
if llfields is None:
llfields = myllfields
else:
llfields = llfields + myllfields
self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef,
self.gcflavor)
self.rbase.setup()
MkStruct = lltype.STRUCT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
if adtmeths is None:
adtmeths = {}
if hints is None:
hints = {}
hints = self._check_for_immutable_hints(hints)
kwds = {}
if self.gcflavor == 'gc':
kwds['rtti'] = True
for name, attrdef in attrs:
if not attrdef.readonly and self.is_quasi_immutable(name):
llfields.append(('mutate_' + name, OBJECTPTR))
object_type = MkStruct(self.classdef.name,
('super', self.rbase.object_type),
hints=hints,
adtmeths=adtmeths,
*llfields,
**kwds)
self.object_type.become(object_type)
allinstancefields.update(self.rbase.allinstancefields)
allinstancefields.update(fields)
self.fields = fields
self.allinstancefields = allinstancefields
def _check_for_immutable_hints(self, hints):
loc = self.classdef.classdesc.lookup('_immutable_')
if loc is not None:
if loc is not self.classdef.classdesc:
raise ImmutableConflictError(
"class %r inherits from its parent _immutable_=True, "
"so it should also declare _immutable_=True" % (
self.classdef,))
if loc.classdict.get('_immutable_').value is not True:
raise TyperError(
"class %r: _immutable_ = something else than True" % (
self.classdef,))
hints = hints.copy()
hints['immutable'] = True
self.immutable_field_set = set() # unless overwritten below
if self.classdef.classdesc.lookup('_immutable_fields_') is not None:
hints = hints.copy()
immutable_fields = self.classdef.classdesc.classdict.get(
'_immutable_fields_')
if immutable_fields is not None:
self.immutable_field_set = set(immutable_fields.value)
accessor = FieldListAccessor()
hints['immutable_fields'] = accessor
return hints
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<InstanceRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'InstanceR %s' % (clsname,)
def _setup_repr_final(self):
self._setup_immutable_field_list()
self._check_for_immutable_conflicts()
if self.gcflavor == 'gc':
if (self.classdef is not None and
self.classdef.classdesc.lookup('__del__') is not None):
s_func = self.classdef.classdesc.s_read_attribute('__del__')
source_desc = self.classdef.classdesc.lookup('__del__')
source_classdef = source_desc.getclassdef(None)
source_repr = getinstancerepr(self.rtyper, source_classdef)
assert len(s_func.descriptions) == 1
funcdesc, = s_func.descriptions
graph = funcdesc.getuniquegraph()
self.check_graph_of_del_does_not_call_too_much(graph)
FUNCTYPE = FuncType([Ptr(source_repr.object_type)], Void)
destrptr = functionptr(FUNCTYPE, graph.name,
graph=graph,
_callable=graph.func)
else:
destrptr = None
OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
self.rtyper.attachRuntimeTypeInfoFunc(self.object_type,
ll_runtime_type_info,
OBJECT, destrptr)
vtable = self.rclass.getvtable()
self.rtyper.set_type_for_typeptr(vtable, self.lowleveltype.TO)
def _setup_immutable_field_list(self):
hints = self.object_type._hints
if "immutable_fields" in hints:
accessor = hints["immutable_fields"]
if not hasattr(accessor, 'fields'):
immutable_fields = set()
rbase = self
while rbase.classdef is not None:
immutable_fields.update(rbase.immutable_field_set)
rbase = rbase.rbase
self._parse_field_list(immutable_fields, accessor, hints)
def _parse_field_list(self, fields, accessor, hints):
ranking = {}
for name in fields:
quasi = False
if name.endswith('?[*]'): # a quasi-immutable field pointing to
name = name[:-4] # an immutable array
rank = IR_QUASIIMMUTABLE_ARRAY
quasi = True
elif name.endswith('[*]'): # for virtualizables' lists
name = name[:-3]
rank = IR_IMMUTABLE_ARRAY
elif name.endswith('?'): # a quasi-immutable field
name = name[:-1]
rank = IR_QUASIIMMUTABLE
quasi = True
else: # a regular immutable/green field
rank = IR_IMMUTABLE
try:
mangled_name, r = self._get_field(name)
except KeyError:
continue
if quasi and hints.get("immutable"):
raise TyperError(
"can't have _immutable_ = True and a quasi-immutable field "
"%s in class %s" % (name, self.classdef))
ranking[mangled_name] = rank
accessor.initialize(self.object_type, ranking)
return ranking
def _check_for_immutable_conflicts(self):
# check for conflicts, i.e. a field that is defined normally as
# mutable in some parent class but that is now declared immutable
is_self_immutable = "immutable" in self.object_type._hints
base = self
while base.classdef is not None:
base = base.rbase
for fieldname in base.fields:
try:
mangled, r = base._get_field(fieldname)
except KeyError:
continue
if r.lowleveltype == Void:
continue
base._setup_immutable_field_list()
if base.object_type._immutable_field(mangled):
continue
# 'fieldname' is a mutable, non-Void field in the parent
if is_self_immutable:
raise ImmutableConflictError(
"class %r has _immutable_=True, but parent class %r "
"defines (at least) the mutable field %r" %
(self, base, fieldname))
if (fieldname in self.immutable_field_set or
(fieldname + '?') in self.immutable_field_set):
raise ImmutableConflictError(
"field %r is defined mutable in class %r, but "
"listed in _immutable_fields_ in subclass %r" %
(fieldname, base, self))
def hook_access_field(self, vinst, cname, llops, flags):
pass # for virtualizables; see rvirtualizable.py
def hook_setfield(self, vinst, fieldname, llops):
if self.is_quasi_immutable(fieldname):
c_fieldname = inputconst(Void, 'mutate_' + fieldname)
llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname])
def is_quasi_immutable(self, fieldname):
search1 = fieldname + '?'
search2 = fieldname + '?[*]'
rbase = self
while rbase.classdef is not None:
if (search1 in rbase.immutable_field_set or
search2 in rbase.immutable_field_set):
return True
rbase = rbase.rbase
return False
def new_instance(self, llops, classcallhop=None, nonmovable=False):
"""Build a new instance, without calling __init__."""
flavor = self.gcflavor
flags = {'flavor': flavor}
if nonmovable:
flags['nonmovable'] = True
ctype = inputconst(Void, self.object_type)
cflags = inputconst(Void, flags)
vlist = [ctype, cflags]
vptr = llops.genop('malloc', vlist,
resulttype=Ptr(self.object_type))
ctypeptr = inputconst(CLASSTYPE, self.rclass.getvtable())
self.setfield(vptr, '__class__', ctypeptr, llops)
# initialize instance attributes from their defaults from the class
if self.classdef is not None:
flds = self.allinstancefields.keys()
flds.sort()
for fldname in flds:
if fldname == '__class__':
continue
mangled_name, r = self.allinstancefields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
ll_value = r.convert_desc_or_const(value)
# don't write NULL GC pointers: we know that the malloc
# done above initialized at least the GC Ptr fields to
# NULL already, and that's true for all our GCs
if (isinstance(r.lowleveltype, Ptr) and
r.lowleveltype.TO._gckind == 'gc' and
not ll_value):
continue
cvalue = inputconst(r.lowleveltype, ll_value)
self.setfield(vptr, fldname, cvalue, llops,
flags={'access_directly': True})
return vptr
def convert_const(self, value):
if value is None:
return self.null_instance()
if isinstance(value, types.MethodType):
value = value.im_self # bound method -> instance
bk = self.rtyper.annotator.bookkeeper
try:
classdef = bk.getuniqueclassdef(value.__class__)
except KeyError:
raise TyperError("no classdef: %r" % (value.__class__,))
if classdef != self.classdef:
# if the class does not match exactly, check that 'value' is an
# instance of a subclass and delegate to that InstanceRepr
if classdef.commonbase(self.classdef) != self.classdef:
raise TyperError("not an instance of %r: %r" % (
self.classdef.name, value))
rinstance = getinstancerepr(self.rtyper, classdef)
result = rinstance.convert_const(value)
return self.upcast(result)
# common case
return self.convert_const_exact(value)
def convert_const_exact(self, value):
try:
return self.iprebuiltinstances[value]
except KeyError:
self.setup()
result = self.create_instance()
self.iprebuiltinstances[value] = result
self.initialize_prebuilt_instance(value, self.classdef, result)
return result
def get_reusable_prebuilt_instance(self):
"Get a dummy prebuilt instance. Multiple calls reuse the same one."
try:
return self._reusable_prebuilt_instance
except AttributeError:
self.setup()
result = self.create_instance()
self._reusable_prebuilt_instance = result
self.initialize_prebuilt_data(Ellipsis, self.classdef, result)
return result
_initialize_data_flattenrec = FlattenRecursion()
def initialize_prebuilt_instance(self, value, classdef, result):
# must fill in the hash cache before the other ones
# (see test_circular_hash_initialization)
self.initialize_prebuilt_hash(value, result)
self._initialize_data_flattenrec(self.initialize_prebuilt_data,
value, classdef, result)
def get_ll_hash_function(self):
return ll_inst_hash
get_ll_fasthash_function = get_ll_hash_function
def rtype_type(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
instance_repr = self.common_repr()
vinst, = hop.inputargs(instance_repr)
if hop.args_s[0].can_be_none():
return hop.gendirectcall(ll_inst_type, vinst)
else:
return instance_repr.getfield(vinst, '__class__', hop.llops)
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
attr = hop.args_s[1].const
vinst, vattr = hop.inputargs(self, Void)
if attr == '__class__' and hop.r_result.lowleveltype is Void:
# special case for when the result of '.__class__' is a constant
[desc] = hop.s_result.descriptions
return hop.inputconst(Void, desc.pyobj)
if attr in self.allinstancefields:
return self.getfield(vinst, attr, hop.llops,
flags=hop.args_s[0].flags)
elif attr in self.rclass.allmethods:
# special case for methods: represented as their 'self' only
# (see MethodsPBCRepr)
return hop.r_result.get_method_from_instance(self, vinst,
hop.llops)
else:
vcls = self.getfield(vinst, '__class__', hop.llops)
return self.rclass.getclsfield(vcls, attr, hop.llops)
def rtype_setattr(self, hop):
attr = hop.args_s[1].const
r_value = self.getfieldrepr(attr)
vinst, vattr, vvalue = hop.inputargs(self, Void, r_value)
self.setfield(vinst, attr, vvalue, hop.llops,
flags=hop.args_s[0].flags)
def rtype_bool(self, hop):
vinst, = hop.inputargs(self)
return hop.genop('ptr_nonzero', [vinst], resulttype=Bool)
def ll_str(self, i): # doesn't work for non-gc classes!
from rpython.rtyper.lltypesystem.ll_str import ll_int2hex
from rpython.rlib.rarithmetic import r_uint
if not i:
return rstr.null_str
instance = cast_pointer(OBJECTPTR, i)
# Two choices: the first gives a fast answer but it can change
# (typically only once) during the life of the object.
#uid = r_uint(cast_ptr_to_int(i))
uid = r_uint(llop.gc_id(lltype.Signed, i))
#
res = rstr.instance_str_prefix
res = rstr.ll_strconcat(res, instance.typeptr.name)
res = rstr.ll_strconcat(res, rstr.instance_str_infix)
res = rstr.ll_strconcat(res, ll_int2hex(uid, False))
res = rstr.ll_strconcat(res, rstr.instance_str_suffix)
return res
def get_ll_eq_function(self):
return None # defaults to compare by identity ('==' on pointers)
def can_ll_be_null(self, s_value):
return s_value.can_be_none()
def check_graph_of_del_does_not_call_too_much(self, graph):
# RPython-level __del__() methods should not do "too much".
# In the PyPy Python interpreter, they usually do simple things
# like file.__del__() closing the file descriptor; or if they
# want to do more like call an app-level __del__() method, they
# enqueue the object instead, and the actual call is done later.
#
# Here, as a quick way to check "not doing too much", we check
# that from no RPython-level __del__() method we can reach a
# JitDriver.
#
# XXX wrong complexity, but good enough because the set of
# reachable graphs should be small
callgraph = self.rtyper.annotator.translator.callgraph.values()
seen = {graph: None}
while True:
oldlength = len(seen)
for caller, callee in callgraph:
if caller in seen and callee not in seen:
func = getattr(callee, 'func', None)
if getattr(func, '_dont_reach_me_in_del_', False):
lst = [str(callee)]
g = caller
while g:
lst.append(str(g))
g = seen.get(g)
lst.append('')
raise TyperError("the RPython-level __del__() method "
"in %r calls:%s" %
(graph, '\n\t'.join(lst[::-1])))
if getattr(func, '_cannot_really_call_random_things_',
False):
continue
seen[callee] = caller
if len(seen) == oldlength:
break
def common_repr(self): # -> object or nongcobject reprs
return getinstancerepr(self.rtyper, None, self.gcflavor)
def _get_field(self, attr):
return self.fields[attr]
def null_instance(self):
return nullptr(self.object_type)
def upcast(self, result):
return cast_pointer(self.lowleveltype, result)
def create_instance(self):
return malloc(self.object_type, flavor=self.gcflavor, immortal=True)
def initialize_prebuilt_data(self, value, classdef, result):
if self.classdef is not None:
# recursively build the parent part of the instance
self.rbase.initialize_prebuilt_data(value, classdef, result.super)
# then add instance attributes from this level
for name, (mangled_name, r) in self.fields.items():
if r.lowleveltype is Void:
llattrvalue = None
else:
try:
attrvalue = getattr(value, name)
except AttributeError:
attrvalue = self.classdef.classdesc.read_attribute(
name, None)
if attrvalue is None:
# Ellipsis from get_reusable_prebuilt_instance()
#if value is not Ellipsis:
#warning("prebuilt instance %r has no "
# "attribute %r" % (value, name))
llattrvalue = r.lowleveltype._defl()
else:
llattrvalue = r.convert_desc_or_const(attrvalue)
else:
llattrvalue = r.convert_const(attrvalue)
setattr(result, mangled_name, llattrvalue)
else:
# OBJECT part
rclass = getclassrepr(self.rtyper, classdef)
result.typeptr = rclass.getvtable()
def initialize_prebuilt_hash(self, value, result):
llattrvalue = getattr(value, '__precomputed_identity_hash', None)
if llattrvalue is not None:
lltype.init_identity_hash(result, llattrvalue)
def getfieldrepr(self, attr):
"""Return the repr used for the given attribute."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
return r
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfieldrepr(attr)
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
"""Read the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
return llops.genop('getfield', [vinst, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfield(vinst, attr, llops, force_cast=True,
flags=flags)
def setfield(self, vinst, attr, vvalue, llops, force_cast=False,
flags={}):
"""Write the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
self.hook_setfield(vinst, attr, llops)
llops.genop('setfield', [vinst, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True,
flags=flags)
def rtype_isinstance(self, hop):
class_repr = get_type_repr(hop.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
if isinstance(v_cls, Constant):
cls = v_cls.value
llf, llf_nonnull = make_ll_isinstance(self.rtyper, cls)
if hop.args_s[0].can_be_None:
return hop.gendirectcall(llf, v_obj)
else:
return hop.gendirectcall(llf_nonnull, v_obj)
else:
return hop.gendirectcall(ll_isinstance, v_obj, v_cls)
class __extend__(pairtype(InstanceRepr, InstanceRepr)):
def convert_from_to((r_ins1, r_ins2), v, llops):
# which is a subclass of which?
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
if basedef == r_ins2.classdef:
# r_ins1 is an instance of the subclass: converting to parent
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
elif basedef == r_ins1.classdef:
# r_ins2 is an instance of the subclass: potentially unsafe
# casting, but we do it anyway (e.g. the annotator produces
# such casts after a successful isinstance() check)
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
else:
return NotImplemented
def rtype_is_((r_ins1, r_ins2), hop):
if r_ins1.gcflavor != r_ins2.gcflavor:
# obscure logic, the is can be true only if both are None
v_ins1, v_ins2 = hop.inputargs(
r_ins1.common_repr(), r_ins2.common_repr())
return hop.gendirectcall(ll_both_none, v_ins1, v_ins2)
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor)
return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop)
rtype_eq = rtype_is_
def rtype_ne(rpair, hop):
v = rpair.rtype_eq(hop)
return hop.genop("bool_not", [v], resulttype=Bool)
# ____________________________________________________________
def rtype_new_instance(rtyper, classdef, llops, classcallhop=None,
nonmovable=False):
rinstance = getinstancerepr(rtyper, classdef)
return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable)
def ll_inst_hash(ins):
if not ins:
return 0 # for None
else:
return lltype.identityhash(ins)
_missing = object()
def fishllattr(inst, name, default=_missing):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return getattr(p, 'inst_' + name)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
if default is _missing:
raise AttributeError("%s has no field %s" %
(lltype.typeOf(widest), name))
return default
def attr_reverse_size((_, T)):
# This is used to sort the instance or class attributes by decreasing
# "likely size", as reported by rffi.sizeof(), to minimize padding
# holes in C. Fields should first be sorted by name, just to minimize
# randomness, and then (stably) sorted by 'attr_reverse_size'.
if T is lltype.Void:
return None
from rpython.rtyper.lltypesystem.rffi import sizeof
try:
return -sizeof(T)
except StandardError:
return None
# ____________________________________________________________
#
# Low-level implementation of operations on classes and instances
# doesn't work for non-gc stuff!
def ll_cast_to_object(obj):
return cast_pointer(OBJECTPTR, obj)
# doesn't work for non-gc stuff!
def ll_type(obj):
return cast_pointer(OBJECTPTR, obj).typeptr
def ll_issubclass(subcls, cls):
return llop.int_between(Bool,
cls.subclassrange_min,
subcls.subclassrange_min,
cls.subclassrange_max)
def ll_issubclass_const(subcls, minid, maxid):
return llop.int_between(Bool, minid, subcls.subclassrange_min, maxid)
def ll_isinstance(obj, cls): # obj should be cast to OBJECT or NONGCOBJECT
if not obj:
return False
obj_cls = obj.typeptr
return ll_issubclass(obj_cls, cls)
def make_ll_isinstance(rtyper, cls):
try:
return rtyper.isinstance_helpers[cls._obj]
except KeyError:
minid = cls.subclassrange_min
maxid = cls.subclassrange_max
if minid.number_with_subclasses():
def ll_isinstance_const_nonnull(obj):
objid = obj.typeptr.subclassrange_min
return llop.int_between(Bool, minid, objid, maxid)
else:
def ll_isinstance_const_nonnull(obj):
return obj.typeptr == cls
def ll_isinstance_const(obj):
if not obj:
return False
return ll_isinstance_const_nonnull(obj)
result = (ll_isinstance_const, ll_isinstance_const_nonnull)
rtyper.isinstance_helpers[cls._obj] = result
return result
def ll_runtime_type_info(obj):
return obj.typeptr.rtti
def ll_inst_type(obj):
if obj:
return obj.typeptr
else:
# type(None) -> NULL (for now)
return nullptr(typeOf(obj).TO.typeptr.TO)
def ll_both_none(ins1, ins2):
return not ins1 and not ins2
# ____________________________________________________________
def feedllattr(inst, name, llvalue):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return setattr(p, 'inst_' + name, llvalue)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
raise AttributeError("%s has no field %s" % (lltype.typeOf(widest),
name))
def declare_type_for_typeptr(vtable, TYPE):
"""Hack for custom low-level-only 'subclasses' of OBJECT:
call this somewhere annotated, in order to declare that it is
of the given TYPE and has got the corresponding vtable."""
class Entry(ExtRegistryEntry):
_about_ = declare_type_for_typeptr
def compute_result_annotation(self, s_vtable, s_TYPE):
assert s_vtable.is_constant()
assert s_TYPE.is_constant()
return annmodel.s_None
def specialize_call(self, hop):
vtable = hop.args_v[0].value
TYPE = hop.args_v[1].value
assert lltype.typeOf(vtable) == CLASSTYPE
assert isinstance(TYPE, GcStruct)
assert lltype._castdepth(TYPE, OBJECT) > 0
hop.rtyper.set_type_for_typeptr(vtable, TYPE)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Void, None)
| mit | -5,830,441,345,531,532,000 | 39.195081 | 91 | 0.57375 | false |
simplegeo/rtree | tests/data.py | 1 | 1251 | import os.path
boxes15 = []
f = file(os.path.join(os.path.dirname(__file__), 'boxes_15x15.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom, right, top] = [float(x) for x in line.split()]
boxes15.append((left, bottom, right, top))
boxes3 = []
f = file(os.path.join(os.path.dirname(__file__), 'boxes_3x3.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom, right, top] = [float(x) for x in line.split()]
boxes3.append((left, bottom, right, top))
points = []
f = file(os.path.join(os.path.dirname(__file__), 'point_clusters.data'), 'r')
for line in f.readlines():
if not line:
break
[left, bottom] = [float(x) for x in line.split()]
points.append((left, bottom))
def draw_data(filename):
from PIL import Image, ImageDraw
im = Image.new('RGB', (1440, 720))
d = ImageDraw.Draw(im)
for box in boxes15:
coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
d.rectangle(coords, outline='red')
for box in boxes3:
coords = [4.0*(box[0]+180), 4.0*(box[1]+90), 4.0*(box[2]+180), 4.0*(box[3]+90)]
d.rectangle(coords, outline='blue')
im.save(filename)
| lgpl-2.1 | 514,539,122,646,075,300 | 31.076923 | 87 | 0.577138 | false |
kvaps/vdsm | vdsm/network/configurators/dhclient.py | 1 | 4498 | # Copyright (C) 2013, IBM Corporation
# Copyright (C) 2013-2014, Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import errno
import logging
import os
import signal
import threading
from vdsm import cmdutils
from vdsm import ipwrapper
from vdsm import netinfo
from vdsm.utils import CommandPath
from vdsm.utils import execCmd
from vdsm.utils import pgrep
from vdsm.utils import rmFile
DHCLIENT_CGROUP = 'vdsm-dhclient'
class DhcpClient(object):
PID_FILE = '/var/run/dhclient%s-%s.pid'
LEASE_DIR = '/var/lib/dhclient'
LEASE_FILE = os.path.join(LEASE_DIR, 'dhclient{0}--{1}.lease')
DHCLIENT = CommandPath('dhclient', '/sbin/dhclient')
def __init__(self, iface, family=4, cgroup=DHCLIENT_CGROUP):
self.iface = iface
self.family = family
self.pidFile = self.PID_FILE % (family, self.iface)
if not os.path.exists(self.LEASE_DIR):
os.mkdir(self.LEASE_DIR)
self.leaseFile = self.LEASE_FILE.format(
'' if family == 4 else '6', self.iface)
self._cgroup = cgroup
def _dhclient(self):
# Ask dhclient to stop any dhclient running for the device
if os.path.exists(os.path.join(netinfo.NET_PATH, self.iface)):
kill_dhclient(self.iface, self.family)
cmd = [self.DHCLIENT.cmd, '-%s' % self.family, '-1', '-pf',
self.pidFile, '-lf', self.leaseFile, self.iface]
cmd = cmdutils.systemd_run(cmd, scope=True, slice=self._cgroup)
rc, out, err = execCmd(cmd)
return rc, out, err
def start(self, blocking):
if blocking:
rc, _, _ = self._dhclient()
return rc
else:
t = threading.Thread(target=self._dhclient, name='vdsm-dhclient-%s'
% self.iface)
t.daemon = True
t.start()
def shutdown(self):
try:
pid = int(open(self.pidFile).readline().strip())
except IOError as e:
if e.errno == os.errno.ENOENT:
pass
else:
raise
else:
_kill_and_rm_pid(pid, self.pidFile)
def kill_dhclient(device_name, family=4):
for pid in pgrep('dhclient'):
try:
with open('/proc/%s/cmdline' % pid) as cmdline:
args = cmdline.read().strip('\0').split('\0')
except IOError as ioe:
if ioe.errno == errno.ENOENT: # exited before we read cmdline
continue
if args[-1] != device_name: # dhclient of another device
continue
tokens = iter(args)
pid_file = '/var/run/dhclient.pid' # Default client pid location
running_family = 4
for token in tokens:
if token == '-pf':
pid_file = next(tokens)
elif token == '--no-pid':
pid_file = None
elif token == '-6':
running_family = 6
if running_family != family:
continue
logging.info('Stopping dhclient -%s before running our own on %s',
family, device_name)
_kill_and_rm_pid(pid, pid_file)
# In order to be able to configure the device with dhclient again. It is
# necessary that dhclient does not find it configured with any IP address
# (except 0.0.0.0 which is fine, or IPv6 link-local address needed for
# DHCPv6).
ipwrapper.addrFlush(device_name, family)
def _kill_and_rm_pid(pid, pid_file):
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == os.errno.ESRCH: # Already exited
pass
else:
raise
if pid_file is not None:
rmFile(pid_file)
| gpl-2.0 | 2,782,631,134,923,793,000 | 33.6 | 79 | 0.609604 | false |
modrzew/igwoctisi-server | Database.py | 1 | 2334 | # -*- coding: utf-8 *-*
from sqlalchemy import *
from sqlalchemy.sql import select
import math
ENGINE = None
CONNECTION = None
USING_DATABASE = False
META = None
CONFIG = None
class Schema:
pass
def connect():
global CONFIG, ENGINE, CONNECTION, USING_DATABASE, META
try:
import Config
except ImportError:
raise Exception('Config module not found')
CONFIG = Config.DATABASE
ENGINE = create_engine('mysql://%s:%s@%s/%s' % (CONFIG['username'], CONFIG['password'], CONFIG['host'], CONFIG['database']), pool_recycle=3600*24)
CONNECTION = ENGINE.connect()
META = MetaData()
USING_DATABASE = True
Schema.users = Table('users', META, autoload=True, autoload_with=ENGINE)
Schema.games = Table('games', META, autoload=True, autoload_with=ENGINE)
Schema.places = Table('places', META, autoload=True, autoload_with=ENGINE)
def login(username, password):
global CONNECTION
s = select(['id']).where(and_(Schema.users.c.username == username, Schema.users.c.password == password)).select_from(Schema.users)
rs = CONNECTION.execute(s)
row = rs.fetchone()
if row is None:
return -1
else:
return row['id']
def create_game(game):
global CONNECTION
values = {
'name': game.name,
'status': 1
}
ins = insert(Schema.games, values=values)
result = CONNECTION.execute(ins)
game.id = result.inserted_primary_key[0]
def save_game(game):
global CONNECTION
values = {
'time': game.time,
'status': 2
}
where = {
'id': game.id
}
CONNECTION.execute(
update(Schema.games)
.where(Schema.games.c.id==game.id)
.values(values)
)
places = game.players_lost + game.players
places.reverse()
for p in places:
place = places.index(p) + 1
places_length = len(places)
if place <= math.ceil(places_length/2.0):
points = int(round(game.map.points * math.pow(0.5, place)))
else:
points = int(round(-game.map.points * math.pow(0.5, (places_length - place + 1))))
values = {
'game_id': game.id,
'user_id': p.id,
'place': place,
'points': int(round(points))
}
ins = insert(Schema.places, values)
CONNECTION.execute(ins)
# Update user points
CONNECTION.execute(
update(Schema.users)
.where(Schema.users.c.id==p.id)
.values({Schema.users.c.points:Schema.users.c.points+points})
)
| mit | 3,333,709,890,906,179,600 | 23.933333 | 147 | 0.657669 | false |
spaceof7/QGIS | tests/src/python/featuresourcetestbase.py | 1 | 35112 | # -*- coding: utf-8 -*-
"""QGIS Unit test utils for QgsFeatureSource subclasses.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import str
from builtins import object
__author__ = 'Nyall Dawson'
__date__ = '2017-05-25'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (
QgsRectangle,
QgsFeatureRequest,
QgsFeature,
QgsWkbTypes,
QgsProject,
QgsGeometry,
QgsAbstractFeatureIterator,
QgsExpressionContextScope,
QgsExpressionContext,
QgsVectorLayerFeatureSource,
QgsCoordinateReferenceSystem,
NULL
)
from utilities import compareWkt
class FeatureSourceTestCase(object):
'''
This is a collection of tests for QgsFeatureSources subclasses and kept generic.
To make use of it, subclass it and set self.source to a QgsFeatureSource you want to test.
Make sure that your source uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/source to a dataset your source is able to handle.
'''
def testCrs(self):
self.assertEqual(self.source.sourceCrs().authid(), 'EPSG:4326')
def testWkbType(self):
self.assertEqual(self.source.wkbType(), QgsWkbTypes.Point)
def testFeatureCount(self):
self.assertEqual(self.source.featureCount(), 5)
self.assertEqual(len(self.source), 5)
def testFields(self):
fields = self.source.fields()
for f in ('pk', 'cnt', 'name', 'name2', 'num_char'):
self.assertTrue(fields.lookupField(f) >= 0)
def testGetFeatures(self, source=None, extra_features=[], skip_features=[], changed_attributes={}, changed_geometries={}):
""" Test that expected results are returned when fetching all features """
# IMPORTANT - we do not use `for f in source.getFeatures()` as we are also
# testing that existing attributes & geometry in f are overwritten correctly
# (for f in ... uses a new QgsFeature for every iteration)
if not source:
source = self.source
it = source.getFeatures()
f = QgsFeature()
attributes = {}
geometries = {}
while it.nextFeature(f):
# expect feature to be valid
self.assertTrue(f.isValid())
# split off the first 5 attributes only - some source test datasets will include
# additional attributes which we ignore
attrs = f.attributes()[0:5]
# force the num_char attribute to be text - some sources (e.g., delimited text) will
# automatically detect that this attribute contains numbers and set it as a numeric
# field
attrs[4] = str(attrs[4])
attributes[f['pk']] = attrs
geometries[f['pk']] = f.hasGeometry() and f.geometry().asWkt()
expected_attributes = {5: [5, -200, NULL, 'NuLl', '5'],
3: [3, 300, 'Pear', 'PEaR', '3'],
1: [1, 100, 'Orange', 'oranGe', '1'],
2: [2, 200, 'Apple', 'Apple', '2'],
4: [4, 400, 'Honey', 'Honey', '4']}
expected_geometries = {1: 'Point (-70.332 66.33)',
2: 'Point (-68.2 70.8)',
3: None,
4: 'Point(-65.32 78.3)',
5: 'Point(-71.123 78.23)'}
for f in extra_features:
expected_attributes[f[0]] = f.attributes()
if f.hasGeometry():
expected_geometries[f[0]] = f.geometry().asWkt()
else:
expected_geometries[f[0]] = None
for i in skip_features:
del expected_attributes[i]
del expected_geometries[i]
for i, a in changed_attributes.items():
for attr_idx, v in a.items():
expected_attributes[i][attr_idx] = v
for i, g, in changed_geometries.items():
if g:
expected_geometries[i] = g.asWkt()
else:
expected_geometries[i] = None
self.assertEqual(attributes, expected_attributes, 'Expected {}, got {}'.format(expected_attributes, attributes))
self.assertEqual(len(expected_geometries), len(geometries))
for pk, geom in list(expected_geometries.items()):
if geom:
assert compareWkt(geom, geometries[pk]), "Geometry {} mismatch Expected:\n{}\nGot:\n{}\n".format(pk, geom, geometries[pk])
else:
self.assertFalse(geometries[pk], 'Expected null geometry for {}'.format(pk))
def assert_query(self, source, expression, expected):
request = QgsFeatureRequest().setFilterExpression(expression).setFlags(QgsFeatureRequest.NoGeometry)
result = set([f['pk'] for f in source.getFeatures(request)])
assert set(expected) == result, 'Expected {} and got {} when testing expression "{}"'.format(set(expected), result, expression)
self.assertTrue(all(f.isValid() for f in source.getFeatures(request)))
# Also check that filter works when referenced fields are not being retrieved by request
result = set([f['pk'] for f in source.getFeatures(QgsFeatureRequest().setFilterExpression(expression).setSubsetOfAttributes([0]))])
assert set(expected) == result, 'Expected {} and got {} when testing expression "{}" using empty attribute subset'.format(set(expected), result, expression)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterExpression(expression)
for f in source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def runGetFeatureTests(self, source):
self.assertEqual(len([f for f in source.getFeatures()]), 5)
self.assert_query(source, 'name ILIKE \'QGIS\'', [])
self.assert_query(source, '"name" IS NULL', [5])
self.assert_query(source, '"name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT LIKE \'Ap%\'', [1, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'QGIS\'', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'pEAR\'', [1, 2, 4])
self.assert_query(source, 'name = \'Apple\'', [2])
self.assert_query(source, 'name <> \'Apple\'', [1, 3, 4])
self.assert_query(source, 'name = \'apple\'', [])
self.assert_query(source, '"name" <> \'apple\'', [1, 2, 3, 4])
self.assert_query(source, '(name = \'Apple\') is not null', [1, 2, 3, 4])
self.assert_query(source, 'name LIKE \'Apple\'', [2])
self.assert_query(source, 'name LIKE \'aPple\'', [])
self.assert_query(source, 'name ILIKE \'aPple\'', [2])
self.assert_query(source, 'name ILIKE \'%pp%\'', [2])
self.assert_query(source, 'cnt > 0', [1, 2, 3, 4])
self.assert_query(source, '-cnt > 0', [5])
self.assert_query(source, 'cnt < 0', [5])
self.assert_query(source, '-cnt < 0', [1, 2, 3, 4])
self.assert_query(source, 'cnt >= 100', [1, 2, 3, 4])
self.assert_query(source, 'cnt <= 100', [1, 5])
self.assert_query(source, 'pk IN (1, 2, 4, 8)', [1, 2, 4])
self.assert_query(source, 'cnt = 50 * 2', [1])
self.assert_query(source, 'cnt = 150 / 1.5', [1])
self.assert_query(source, 'cnt = 1000 / 10', [1])
self.assert_query(source, 'cnt = 1000/11+10', []) # checks that source isn't rounding int/int
self.assert_query(source, 'pk = 9 // 4', [2]) # int division
self.assert_query(source, 'cnt = 99 + 1', [1])
self.assert_query(source, 'cnt = 101 - 1', [1])
self.assert_query(source, 'cnt - 1 = 99', [1])
self.assert_query(source, '-cnt - 1 = -101', [1])
self.assert_query(source, '-(-cnt) = 100', [1])
self.assert_query(source, '-(cnt) = -(100)', [1])
self.assert_query(source, 'cnt + 1 = 101', [1])
self.assert_query(source, 'cnt = 1100 % 1000', [1])
self.assert_query(source, '"name" || \' \' || "name" = \'Orange Orange\'', [1])
self.assert_query(source, '"name" || \' \' || "cnt" = \'Orange 100\'', [1])
self.assert_query(source, '\'x\' || "name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '\'x\' || "name" IS NULL', [5])
self.assert_query(source, 'cnt = 10 ^ 2', [1])
self.assert_query(source, '"name" ~ \'[OP]ra[gne]+\'', [1])
self.assert_query(source, '"name"="name2"', [2, 4]) # mix of matched and non-matched case sensitive names
self.assert_query(source, 'true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false', [])
# Three value logic
self.assert_query(source, 'false and false', [])
self.assert_query(source, 'false and true', [])
self.assert_query(source, 'false and NULL', [])
self.assert_query(source, 'true and false', [])
self.assert_query(source, 'true and true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true and NULL', [])
self.assert_query(source, 'NULL and false', [])
self.assert_query(source, 'NULL and true', [])
self.assert_query(source, 'NULL and NULL', [])
self.assert_query(source, 'false or false', [])
self.assert_query(source, 'false or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false or NULL', [])
self.assert_query(source, 'true or false', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or NULL', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or false', [])
self.assert_query(source, 'NULL or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or NULL', [])
self.assert_query(source, 'not true', [])
self.assert_query(source, 'not false', [1, 2, 3, 4, 5])
self.assert_query(source, 'not null', [])
# not
self.assert_query(source, 'not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name IS NULL', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or name = \'Apple\'', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name = \'Apple\' and pk = 4', [4])
self.assert_query(source, 'not name = \'Apple\' and not pk = 4', [1, 3])
self.assert_query(source, 'not pk IN (1, 2, 4, 8)', [3, 5])
# type conversion - QGIS expressions do not mind that we are comparing a string
# against numeric literals
self.assert_query(source, 'num_char IN (2, 4, 5)', [2, 4, 5])
#function
self.assert_query(source, 'sqrt(pk) >= 2', [4, 5])
self.assert_query(source, 'radians(cnt) < 2', [1, 5])
self.assert_query(source, 'degrees(pk) <= 200', [1, 2, 3])
self.assert_query(source, 'abs(cnt) <= 200', [1, 2, 5])
self.assert_query(source, 'cos(pk) < 0', [2, 3, 4])
self.assert_query(source, 'sin(pk) < 0', [4, 5])
self.assert_query(source, 'tan(pk) < 0', [2, 3, 5])
self.assert_query(source, 'acos(-1) < pk', [4, 5])
self.assert_query(source, 'asin(1) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan(3.14) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan2(3.14, pk) < 1', [3, 4, 5])
self.assert_query(source, 'exp(pk) < 10', [1, 2])
self.assert_query(source, 'ln(pk) <= 1', [1, 2])
self.assert_query(source, 'log(3, pk) <= 1', [1, 2, 3])
self.assert_query(source, 'log10(pk) < 0.5', [1, 2, 3])
self.assert_query(source, 'round(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'round(0.314,1) * 10 = pk', [3])
self.assert_query(source, 'floor(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'ceil(3.14) <= pk', [4, 5])
self.assert_query(source, 'pk < pi()', [1, 2, 3])
self.assert_query(source, 'round(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'floor(cnt / 66.67) <= 2', [1, 2, 5])
self.assert_query(source, 'ceil(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'pk < pi() / 2', [1])
self.assert_query(source, 'pk = char(51)', [3])
self.assert_query(source, 'pk = coalesce(NULL,3,4)', [3])
self.assert_query(source, 'lower(name) = \'apple\'', [2])
self.assert_query(source, 'upper(name) = \'APPLE\'', [2])
self.assert_query(source, 'name = trim(\' Apple \')', [2])
# geometry
# azimuth and touches tests are deactivated because they do not pass for WFS source
#self.assert_query(source, 'azimuth($geometry,geom_from_wkt( \'Point (-70 70)\')) < pi()', [1, 5])
self.assert_query(source, 'x($geometry) < -70', [1, 5])
self.assert_query(source, 'y($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmin($geometry) < -70', [1, 5])
self.assert_query(source, 'ymin($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmax($geometry) < -70', [1, 5])
self.assert_query(source, 'ymax($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))', [4, 5])
self.assert_query(source, 'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))', [1, 2])
#self.assert_query(source, 'touches($geometry,geom_from_wkt( \'Polygon ((-70.332 66.33, -65.32 66.33, -65.32 78.3, -70.332 78.3, -70.332 66.33))\'))', [1, 4])
self.assert_query(source, 'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)', [1, 2])
self.assert_query(source, 'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7', [4, 5])
self.assert_query(source, 'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))', [1, 2])
# combination of an uncompilable expression and limit
# TODO - move this test to FeatureSourceTestCase
# it's currently added in ProviderTestCase, but tests only using a QgsVectorLayer getting features,
# i.e. not directly requesting features from the provider. Turns out the WFS provider fails this
# and should be fixed - then we can enable this test at the FeatureSourceTestCase level
#feature = next(self.source.getFeatures(QgsFeatureRequest().setFilterExpression('pk=4')))
#context = QgsExpressionContext()
#scope = QgsExpressionContextScope()
#scope.setVariable('parent', feature)
#context.appendScope(scope)
#request = QgsFeatureRequest()
#request.setExpressionContext(context)
#request.setFilterExpression('"pk" = attribute(@parent, \'pk\')')
#request.setLimit(1)
#values = [f['pk'] for f in self.source.getFeatures(request)]
#self.assertEqual(values, [4])
def testGetFeaturesExp(self):
self.runGetFeatureTests(self.source)
def runOrderByTests(self):
request = QgsFeatureRequest().addOrderBy('cnt')
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [-200, 100, 200, 300, 400])
request = QgsFeatureRequest().addOrderBy('cnt', False)
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [400, 300, 200, 100, -200])
request = QgsFeatureRequest().addOrderBy('name')
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'Orange', 'Pear', NULL])
request = QgsFeatureRequest().addOrderBy('name', True, True)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Apple', 'Honey', 'Orange', 'Pear'])
request = QgsFeatureRequest().addOrderBy('name', False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Pear', 'Orange', 'Honey', 'Apple'])
request = QgsFeatureRequest().addOrderBy('name', False, False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Pear', 'Orange', 'Honey', 'Apple', NULL])
# Case sensitivity
request = QgsFeatureRequest().addOrderBy('name2')
values = [f['name2'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'NuLl', 'oranGe', 'PEaR'])
# Combination with LIMIT
request = QgsFeatureRequest().addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# A slightly more complex expression
request = QgsFeatureRequest().addOrderBy('pk*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order reversing expression
request = QgsFeatureRequest().addOrderBy('pk*-1', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [1, 2, 3, 4, 5])
# Type dependent expression
request = QgsFeatureRequest().addOrderBy('num_char*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order by guaranteed to fail
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(set(values), set([5, 4, 3, 2, 1]))
# Multiple order bys and boolean
request = QgsFeatureRequest().addOrderBy('pk > 2').addOrderBy('pk', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [2, 1, 5, 4, 3])
# Multiple order bys, one bad, and a limit
request = QgsFeatureRequest().addOrderBy('pk', False).addOrderBy('not a valid expression*', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Bad expression first
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False).addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.source.fields())
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
def testOrderBy(self):
self.runOrderByTests()
def testOpenIteratorAfterSourceRemoval(self):
"""
Test that removing source after opening an iterator does not crash. All required
information should be captured in the iterator's source and there MUST be no
links between the iterators and the sources's data source
"""
if not getattr(self, 'getSource', None):
return
source = self.getSource()
it = source.getFeatures()
del source
# get the features
pks = []
for f in it:
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testGetFeaturesFidTests(self):
fids = [f.id() for f in self.source.getFeatures()]
assert len(fids) == 5, 'Expected 5 features, got {} instead'.format(len(fids))
for id in fids:
features = [f for f in self.source.getFeatures(QgsFeatureRequest().setFilterFid(id))]
self.assertEqual(len(features), 1)
feature = features[0]
self.assertTrue(feature.isValid())
result = [feature.id()]
expected = [id]
assert result == expected, 'Expected {} and got {} when testing for feature ID filter'.format(expected, result)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterFid(id)
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() == id)
# bad features
it = self.source.getFeatures(QgsFeatureRequest().setFilterFid(-99999999))
feature = QgsFeature(5)
feature.setValid(False)
self.assertFalse(it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFidsTests(self):
fids = [f.id() for f in self.source.getFeatures()]
self.assertEqual(len(fids), 5)
request = QgsFeatureRequest().setFilterFids([fids[0], fids[2]])
result = set([f.id() for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = set([fids[0], fids[2]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() in expected)
result = set([f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
#sources should ignore non-existent fids
result = set([f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([-101, fids[1], -102, fids[3], -103, fids[4], -104]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
result = set([f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([]))])
expected = set([])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
# Rewind mid-way
request = QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]])
feature_it = self.source.getFeatures(request)
feature = QgsFeature()
feature.setValid(True)
self.assertTrue(feature_it.nextFeature(feature))
self.assertIn(feature.id(), [fids[1], fids[3], fids[4]])
first_feature = feature
self.assertTrue(feature.isValid())
# rewind
self.assertTrue(feature_it.rewind())
self.assertTrue(feature_it.nextFeature(feature))
self.assertEqual(feature.id(), first_feature.id())
self.assertTrue(feature.isValid())
# grab all features
self.assertTrue(feature_it.nextFeature(feature))
self.assertTrue(feature_it.nextFeature(feature))
# none left
self.assertFalse(feature_it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFilterRectTests(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 4]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2, 4]))
# test with an empty rectangle
extent = QgsRectangle()
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# ExactIntersection flag set, but no filter rect set. Should be ignored.
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.ExactIntersect)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
def testRectAndExpression(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterExpression('"cnt">200').setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(set(expected), result)
self.assertTrue(all_valid)
# shouldn't matter what order this is done in
request = QgsFeatureRequest().setFilterRect(extent).setFilterExpression('"cnt">200')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def testGetFeaturesDestinationCrs(self):
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'), QgsProject.instance().transformContext())
features = {f['pk']: f for f in self.source.getFeatures(request)}
# test that features have been reprojected
self.assertAlmostEqual(features[1].geometry().constGet().x(), -7829322, -5)
self.assertAlmostEqual(features[1].geometry().constGet().y(), 9967753, -5)
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertFalse(features[3].hasGeometry())
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
self.assertAlmostEqual(features[5].geometry().constGet().x(), -7917376, -5)
self.assertAlmostEqual(features[5].geometry().constGet().y(), 14493008, -5)
# when destination crs is set, filter rect should be in destination crs
rect = QgsRectangle(-7650000, 10500000, -7200000, 15000000)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'), QgsProject.instance().transformContext()).setFilterRect(rect)
features = {f['pk']: f for f in self.source.getFeatures(request)}
self.assertEqual(set(features.keys()), {2, 4})
# test that features have been reprojected
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
# bad rect for transform
rect = QgsRectangle(-99999999999, 99999999999, -99999999998, 99999999998)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:28356'), QgsProject.instance().transformContext()).setFilterRect(rect)
features = [f for f in self.source.getFeatures(request)]
self.assertFalse(features)
def testGetFeaturesLimit(self):
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2))
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features, got {} instead'.format(len(features))
# fetch one feature
feature = QgsFeature()
assert not it.nextFeature(feature), 'Expected no feature after limit, got one'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
it.rewind()
assert it.nextFeature(feature), 'Expected feature after rewind, got none'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
# test with expression, both with and without compilation
try:
self.disableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
try:
self.enableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
# limit to more features than exist
it = self.source.getFeatures(QgsFeatureRequest().setLimit(3).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(features)
# limit to less features than possible
it = self.source.getFeatures(QgsFeatureRequest().setLimit(1).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert 1 in features or 5 in features, 'Expected either 1 or 5 for expression and feature limit, Got {} instead'.format(features)
def testClosedIterators(self):
""" Test behavior of closed iterators """
# Test retrieving feature after closing iterator
f_it = self.source.getFeatures(QgsFeatureRequest())
fet = QgsFeature()
assert f_it.nextFeature(fet), 'Could not fetch feature'
assert fet.isValid(), 'Feature is not valid'
assert f_it.close(), 'Could not close iterator'
self.assertFalse(f_it.nextFeature(fet), 'Fetched feature after iterator closed, expected nextFeature() to return False')
self.assertFalse(fet.isValid(), 'Valid feature fetched from closed iterator, should be invalid')
# Test rewinding closed iterator
self.assertFalse(f_it.rewind(), 'Rewinding closed iterator successful, should not be allowed')
def testGetFeaturesSubsetAttributes(self):
""" Test that expected results are returned when using subsets of attributes """
tests = {'pk': set([1, 2, 3, 4, 5]),
'cnt': set([-200, 300, 100, 200, 400]),
'name': set(['Pear', 'Orange', 'Apple', 'Honey', NULL]),
'name2': set(['NuLl', 'PEaR', 'oranGe', 'Apple', 'Honey'])}
for field, expected in list(tests.items()):
request = QgsFeatureRequest().setSubsetOfAttributes([field], self.source.fields())
result = set([f[field] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, expected, 'Expected {}, got {}'.format(expected, result))
self.assertTrue(all_valid)
def testGetFeaturesSubsetAttributes2(self):
""" Test that other fields are NULL when fetching subsets of attributes """
for field_to_fetch in ['pk', 'cnt', 'name', 'name2']:
for f in self.source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([field_to_fetch], self.source.fields())):
# Check that all other fields are NULL and force name to lower-case
for other_field in [field.name() for field in self.source.fields() if field.name().lower() != field_to_fetch]:
if other_field == 'pk' or other_field == 'PK':
# skip checking the primary key field, as it may be validly fetched by providers to use as feature id
continue
self.assertEqual(f[other_field], NULL, 'Value for field "{}" was present when it should not have been fetched by request'.format(other_field))
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.source.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.hasGeometry(), 'Expected no geometry, got one')
self.assertTrue(f.isValid())
def testGetFeaturesWithGeometry(self):
""" Test that geometry is present when fetching features without setting NoGeometry flag"""
for f in self.source.getFeatures(QgsFeatureRequest()):
if f['pk'] == 3:
# no geometry for this feature
continue
assert f.hasGeometry(), 'Expected geometry, got none'
self.assertTrue(f.isValid())
def testUniqueValues(self):
self.assertEqual(set(self.source.uniqueValues(1)), set([-200, 100, 200, 300, 400]))
assert set(['Apple', 'Honey', 'Orange', 'Pear', NULL]) == set(self.source.uniqueValues(2)), 'Got {}'.format(set(self.source.uniqueValues(2)))
def testMinimumValue(self):
self.assertEqual(self.source.minimumValue(1), -200)
self.assertEqual(self.source.minimumValue(2), 'Apple')
def testMaximumValue(self):
self.assertEqual(self.source.maximumValue(1), 400)
self.assertEqual(self.source.maximumValue(2), 'Pear')
def testAllFeatureIds(self):
ids = set([f.id() for f in self.source.getFeatures()])
self.assertEqual(set(self.source.allFeatureIds()), ids)
| gpl-2.0 | 8,778,962,239,832,357,000 | 52.039275 | 299 | 0.613608 | false |
jason-ni/eventlet-raft | counter_test.py | 1 | 1047 | from eventlet_raft.client import RaftClient
server_address_list = [
('127.0.0.1', 4000),
('127.0.0.1', 4001),
('127.0.0.1', 4002),
('127.0.0.1', 4003),
('127.0.0.1', 4004),
]
def write_log(log, data, msg):
log.write("{0}: {1}\n".format(
msg,
str(data),
))
client = RaftClient(server_address_list)
print client.register()
with open('counter_test.log', 'w') as log:
ret = client.set_value('counter', 0)
if not ret['success']:
raise Exception("failed to reset counter")
write_log(log, ret, 'reset counter')
accu = 0
for i in range(1000):
ret = client.set_value('counter', i)
if not ret['success']:
raise Exception("failed to set counter")
write_log(log, ret, 'set counter:')
ret = client.get_value('counter')
write_log(log, ret, 'get counter:')
if not ret['success']:
raise Exception("failed to get counter")
accu += ret['resp'][1]
write_log(log, accu, i)
print 'result: ', accu
| apache-2.0 | 6,919,018,597,424,594,000 | 25.175 | 52 | 0.560649 | false |
Signbank/FinSL-signbank | signbank/dictionary/views.py | 1 | 7652 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.views.generic.list import ListView
from django.views.generic import FormView
from django.db.models import Q, F, Count, Case, Value, When, BooleanField
from tagging.models import Tag
from guardian.shortcuts import get_perms, get_objects_for_user, get_users_with_perms
from notifications.signals import notify
from .models import Dataset, Keyword, FieldChoice, Gloss, GlossRelation
from .forms import GlossCreateForm, LexiconForm
from ..video.forms import GlossVideoForm
@permission_required('dictionary.add_gloss')
def create_gloss(request):
"""Handle Gloss creation."""
if request.method == 'POST':
form = GlossCreateForm(request.POST)
glossvideoform = GlossVideoForm(request.POST, request.FILES)
glossvideoform.fields['videofile'].required=False
if form.is_valid() and glossvideoform.is_valid():
if 'view_dataset' not in get_perms(request.user, form.cleaned_data["dataset"]):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to create glosses for this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
new_gloss = form.save(commit=False)
new_gloss.created_by = request.user
new_gloss.updated_by = request.user
new_gloss.save()
if form.cleaned_data["tag"]:
Tag.objects.add_tag(new_gloss, form.cleaned_data["tag"].name)
if glossvideoform.cleaned_data['videofile']:
glossvideo = glossvideoform.save(commit=False)
glossvideo.gloss = new_gloss
glossvideo.save()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': new_gloss.pk}))
else:
# Return bound fields with errors if the form is not valid.
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
return render(request, 'dictionary/create_gloss.html', {'form': form, 'glossvideoform': glossvideoform})
else:
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
form = GlossCreateForm()
glossvideoform = GlossVideoForm()
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
return render(request, 'dictionary/create_gloss.html', {'form': form, 'glossvideoform': glossvideoform})
def keyword_value_list(request, prefix=None):
"""View to generate a list of possible values for a keyword given a prefix."""
kwds = Keyword.objects.filter(text__startswith=prefix)
kwds_list = [k.text for k in kwds]
return HttpResponse("\n".join(kwds_list), content_type='text/plain')
@user_passes_test(lambda u: u.is_staff, login_url='/accounts/login/')
def try_code(request):
"""A view for the developer to try out things"""
choicedict = {}
for key, choices in list(choicedict.items()):
for machine_value, english_name in choices:
FieldChoice(
english_name=english_name, field=key, machine_value=machine_value).save()
return HttpResponse('OK', status=200)
class ManageLexiconsListView(ListView):
model = Dataset
template_name = 'dictionary/manage_lexicons.html'
paginate_by = 50
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
qs = self.get_queryset()
context['has_permissions'] = qs.filter(has_view_perm=True)
context['no_permissions'] = qs.filter(has_view_perm=False)
# Show users with permissions to lexicons to SuperUsers
if self.request.user.is_superuser:
for lexicon in context['has_permissions']:
lexicon.users_with_perms = get_users_with_perms(obj=lexicon, with_superusers=True)
for lexicon in context['no_permissions']:
lexicon.users_with_perms = get_users_with_perms(obj=lexicon, with_superusers=True)
return context
def get_queryset(self):
# Get allowed datasets for user (django-guardian)
allowed_datasets = get_objects_for_user(self.request.user, 'dictionary.view_dataset')
# Get queryset
qs = super().get_queryset()
qs = qs.annotate(
has_view_perm=Case(
When(Q(id__in=allowed_datasets), then=Value(True)),
default=Value(False), output_field=BooleanField()))
qs = qs.select_related('signlanguage')
return qs
class ApplyLexiconPermissionsFormView(FormView):
form_class = LexiconForm
template_name = 'dictionary/manage_lexicons.html'
success_url = reverse_lazy('dictionary:manage_lexicons')
def form_valid(self, form):
dataset = form.cleaned_data['dataset']
admins = dataset.admins.all()
notify.send(sender=self.request.user, recipient=admins,
verb="{txt} {dataset}".format(txt=_("applied for permissions to:"), dataset=dataset.public_name),
action_object=self.request.user,
description="{user} ({user.first_name} {user.last_name}) {txt} {dataset}".format(
user=self.request.user, txt=_("applied for permissions to lexicon:"),
dataset=dataset.public_name
),
target=self.request.user, public=False)
msg = "{text} {lexicon_name}".format(text=_("Successfully applied permissions for"), lexicon_name=dataset.public_name)
messages.success(self.request, msg)
return super().form_valid(form)
def network_graph(request):
"""Network graph of GlossRelations"""
context = dict()
form = LexiconForm(request.GET, use_required_attribute=False)
# Get allowed datasets for user (django-guardian)
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
# Filter the forms dataset field for the datasets user has permission to.
form.fields["dataset"].queryset = Dataset.objects.filter(id__in=[x.id for x in allowed_datasets])
dataset = None
if form.is_valid():
form.fields["dataset"].widget.is_required = False
dataset = form.cleaned_data["dataset"]
if dataset:
context["dataset"] = dataset
nodeqs = Gloss.objects.filter(Q(dataset=dataset),
Q(glossrelation_target__isnull=False) | Q(glossrelation_source__isnull=False))\
.distinct().values("id").annotate(label=F("idgloss"), size=Count("glossrelation_source")+Count("glossrelation_target"))
context["nodes"] = json.dumps(list(nodeqs))
edgeqs = GlossRelation.objects.filter(Q(source__dataset=dataset) | Q(target__dataset=dataset)).values("id", "source", "target")
context["edges"] = json.dumps(list(edgeqs))
return render(request, "dictionary/network_graph.html",
{'context': context,
'form': form
})
| bsd-3-clause | -1,069,654,723,331,064,700 | 46.825 | 135 | 0.659827 | false |
PoprostuRonin/memes-api | parsers/mistrzowie.py | 1 | 1724 | from parsel import Selector
from utils import download, find_id_in_url, catch_errors, get_last_part_url
from data import ImageContent, Meme, Author, Page
import re
ROOT = "https://mistrzowie.org"
COMMENT = re.compile(r"Skomentuj\(([0-9]+?)\)")
def scrap(url):
html = download(url)
return parse(html)
def parse(html):
document = Selector(text=html)
memes = [catch_errors(parse_meme, element) for element in document.css("div.pic")]
memes = [meme for meme in memes if meme is not None]
title = document.css("title::text").get()
next_page_url = "/mistrzowie/page/" + get_last_part_url(
document.css(".list_next_page_button::attr(href)").get()
)
return Page(title, memes, next_page_url)
def parse_meme(m):
title = m.css("h1.picture > a::text").get()
if title is None:
return None
title = title.strip()
url = m.css("h1.picture > a::attr(href)").get()
points = None
points_text = m.css("span.total_votes_up > span.value::text").get()
try:
points = int(points_text)
except:
pass
comment_count = None
comments_count_text = (
m.css("a.lcomment::text").get().replace("\t", "").replace("\n", "")
)
result = COMMENT.match(comments_count_text)
if result:
try:
comment_count = int(result[1])
except:
pass
else:
comment_count = 0
content = None
src = m.css("img.pic::attr(src)").get()
if src:
content = ImageContent(ROOT + src)
return Meme(
title,
ROOT + url,
"/mistrzowie/{}".format(find_id_in_url(url)),
content,
None,
None,
points,
comment_count,
)
| mit | -4,502,319,310,330,862,600 | 23.28169 | 86 | 0.581206 | false |
mameebox/mameebox | mameebox/printer/driver/proto.py | 1 | 15212 | #!/usr/bin/env python
#coding=utf-8
import serial, time
#===========================================================#
# RASPBERRY PI (tested with Raspbian Jan 2012):
# - Ensure that ttyAMA0 is not used for serial console access:
# edit /boot/cmdline.txt (remove all name-value pairs containing
# ttyAMA0) and comment out last line in /etc/inittab.
# - Fix user permissions with "sudo usermod -a -G dialout pi"
# - Reboot
# - Ensure that the SERIALPORT setting is correct below
#
# BEAGLE BONE:
# Mux settings (Ängström 2012.05, also work on ubuntu 12.04):
# echo 1 > /sys/kernel/debug/omap_mux/spi0_sclk
# echo 1 > /sys/kernel/debug/omap_mux/spi0_d0
#===========================================================#
def chr(x):
return bytearray((x,))
class ThermalPrinter(object):
"""
Thermal printing library that controls the "micro panel thermal printer" sold in
shops like Adafruit and Sparkfun (e.g. http://www.adafruit.com/products/597).
Mostly ported from Ladyada's Arduino library
(https://github.com/adafruit/Adafruit-Thermal-Printer-Library) to run on
BeagleBone and Raspberry Pi.
Currently handles printing image data and text, but the rest of the
built-in functionality like underlining and barcodes are trivial
to port to Python when needed.
If on BeagleBone or similar device, remember to set the mux settings
or change the UART you are using. See the beginning of this file for
default setup.
Thanks to Matt Richardson for the initial pointers on controlling the
device via Python.
@author: Lauri Kainulainen
"""
# default serial port for the Beagle Bone
#SERIALPORT = '/dev/ttyO2'
# this might work better on a Raspberry Pi
SERIALPORT = '/dev/ttyAMA0'
BAUDRATE = 19200
TIMEOUT = 3
# pixels with more color value (average for multiple channels) are counted as white
# tweak this if your images appear too black or too white
black_threshold = 48
# pixels with less alpha than this are counted as white
alpha_threshold = 127
printer = None
_ESC = chr(27)
# These values (including printDensity and printBreaktime) are taken from
# lazyatom's Adafruit-Thermal-Library branch and seem to work nicely with bitmap
# images. Changes here can cause symptoms like images printing out as random text.
# Play freely, but remember the working values.
# https://github.com/adafruit/Adafruit-Thermal-Printer-Library/blob/0cc508a9566240e5e5bac0fa28714722875cae69/Thermal.cpp
# Set "max heating dots", "heating time", "heating interval"
# n1 = 0-255 Max printing dots, Unit (8dots), Default: 7 (64 dots)
# n2 = 3-255 Heating time, Unit (10us), Default: 80 (800us)
# n3 = 0-255 Heating interval, Unit (10us), Default: 2 (20us)
# The more max heating dots, the more peak current will cost
# when printing, the faster printing speed. The max heating
# dots is 8*(n1+1). The more heating time, the more density,
# but the slower printing speed. If heating time is too short,
# blank page may occur. The more heating interval, the more
# clear, but the slower printing speed.
def __init__(self, heatTime=80, heatInterval=2, heatingDots=7, serialport=SERIALPORT):
self.printer = serial.Serial(serialport, self.BAUDRATE, timeout=self.TIMEOUT)
self.printer.write(self._ESC) # ESC - command
self.printer.write(chr(64)) # @ - initialize
self.printer.write(self._ESC) # ESC - command
self.printer.write(chr(55)) # 7 - print(settings)
self.printer.write(chr(heatingDots)) # Heating dots (20=balance of darkness vs no jams) default = 20
self.printer.write(chr(heatTime)) # heatTime Library default = 255 (max)
self.printer.write(chr(heatInterval)) # Heat interval (500 uS = slower, but darker) default = 250
# Description of print density from page 23 of the manual:
# DC2 # n Set printing density
# Decimal: 18 35 n
# D4..D0 of n is used to set the printing density. Density is 50% + 5% * n(D4-D0) printing density.
# D7..D5 of n is used to set the printing break time. Break time is n(D7-D5)*250us.
printDensity = 15 # 120% (? can go higher, text is darker but fuzzy)
printBreakTime = 15 # 500 uS
self.printer.write(chr(18))
self.printer.write(chr(35))
self.printer.write(chr((printDensity << 4) | printBreakTime))
def reset(self):
self.printer.write(self._ESC)
self.printer.write(chr(64))
def linefeed(self):
self.printer.write(chr(10))
def justify(self, align="L"):
pos = 0
if align == "L":
pos = 0
elif align == "C":
pos = 1
elif align == "R":
pos = 2
self.printer.write(self._ESC)
self.printer.write(chr(97))
self.printer.write(chr(pos))
def bold_off(self):
self.printer.write(self._ESC)
self.printer.write(chr(69))
self.printer.write(chr(0))
def bold_on(self):
self.printer.write(self._ESC)
self.printer.write(chr(69))
self.printer.write(chr(1))
def font_b_off(self):
self.printer.write(self._ESC)
self.printer.write(chr(33))
self.printer.write(chr(0))
def font_b_on(self):
self.printer.write(self._ESC)
self.printer.write(chr(33))
self.printer.write(chr(1))
def underline_off(self):
self.printer.write(self._ESC)
self.printer.write(chr(45))
self.printer.write(chr(0))
def underline_on(self):
self.printer.write(self._ESC)
self.printer.write(chr(45))
self.printer.write(chr(1))
def inverse_off(self):
self.printer.write(chr(29))
self.printer.write(chr(66))
self.printer.write(chr(0))
def inverse_on(self):
self.printer.write(chr(29))
self.printer.write(chr(66))
self.printer.write(chr(1))
def upsidedown_off(self):
self.printer.write(self._ESC)
self.printer.write(chr(123))
self.printer.write(chr(0))
def upsidedown_on(self):
self.printer.write(self._ESC)
self.printer.write(chr(123))
self.printer.write(chr(1))
def barcode_chr(self, msg):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(72)) # Leave
self.printer.write(msg) # Print barcode # 1:Abovebarcode 2:Below 3:Both 0:Not printed
def barcode_height(self, msg):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(104)) # Leave
self.printer.write(msg) # Value 1-255 Default 50
def barcode_height(self):
self.printer.write(chr(29)) # Leave
self.printer.write(chr(119)) # Leave
self.printer.write(chr(2)) # Value 2,3 Default 2
def barcode(self, msg):
""" Please read http://www.adafruit.com/datasheets/A2-user%20manual.pdf
for information on how to use barcodes. """
# CODE SYSTEM, NUMBER OF CHARACTERS
# 65=UPC-A 11,12 #71=CODEBAR >1
# 66=UPC-E 11,12 #72=CODE93 >1
# 67=EAN13 12,13 #73=CODE128 >1
# 68=EAN8 7,8 #74=CODE11 >1
# 69=CODE39 >1 #75=MSI >1
# 70=I25 >1 EVEN NUMBER
self.printer.write(chr(29)) # LEAVE
self.printer.write(chr(107)) # LEAVE
self.printer.write(chr(65)) # USE ABOVE CHART
self.printer.write(chr(12)) # USE CHART NUMBER OF CHAR
self.printer.write(msg)
def print_text(self, msg, chars_per_line=None):
""" Print some text defined by msg. If chars_per_line is defined,
inserts newlines after the given amount. Use normal '\n' line breaks for
empty lines. """
if chars_per_line == None:
self.printer.write(msg)
else:
l = list(msg)
le = len(msg)
for i in range(chars_per_line + 1, le, chars_per_line + 1):
l.insert(i, '\n')
self.printer.write("".join(l))
def print_markup(self, markup):
""" Print text with markup for styling.
Keyword arguments:
markup -- text with a left column of markup as follows:
first character denotes style (n=normal, b=bold, u=underline, i=inverse, f=font B)
second character denotes justification (l=left, c=centre, r=right)
third character must be a space, followed by the text of the line.
"""
lines = markup.splitlines(True)
for l in lines:
style = l[0]
justification = l[1].upper()
text = l[3:]
if style == 'b':
self.bold_on()
elif style == 'u':
self.underline_on()
elif style == 'i':
self.inverse_on()
elif style == 'f':
self.font_b_on()
self.justify(justification)
self.print_text(text)
if justification != 'L':
self.justify()
if style == 'b':
self.bold_off()
elif style == 'u':
self.underline_off()
elif style == 'i':
self.inverse_off()
elif style == 'f':
self.font_b_off()
def convert_pixel_array_to_binary(self, pixels, w, h):
""" Convert the pixel array into a black and white plain list of 1's and 0's
width is enforced to 384 and padded with white if needed. """
black_and_white_pixels = [1] * 384 * h
if w > 384:
print("Bitmap width too large: %s. Needs to be under 384" % w)
return False
elif w < 384:
print("Bitmap under 384 (%s), padding the rest with white" % w)
print("Bitmap size", w)
if type(pixels[0]) == int: # single channel
print(" => single channel")
for i, p in enumerate(pixels):
if p < self.black_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
elif type(pixels[0]) in (list, tuple) and len(pixels[0]) == 3: # RGB
print(" => RGB channel")
for i, p in enumerate(pixels):
if sum(p[0:2]) / 3.0 < self.black_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
elif type(pixels[0]) in (list, tuple) and len(pixels[0]) == 4: # RGBA
print(" => RGBA channel")
for i, p in enumerate(pixels):
if sum(p[0:2]) / 3.0 < self.black_threshold and p[3] > self.alpha_threshold:
black_and_white_pixels[i % w + i / w * 384] = 0
else:
black_and_white_pixels[i % w + i / w * 384] = 1
else:
print("Unsupported pixels array type. Please send plain list (single channel, RGB or RGBA)")
print("Type pixels[0]", type(pixels[0]), "haz", pixels[0])
return False
return black_and_white_pixels
def print_bitmap(self, pixels, w, h, output_png=False):
""" Best to use images that have a pixel width of 384 as this corresponds
to the printer row width.
pixels = a pixel array. RGBA, RGB, or one channel plain list of values (ranging from 0-255).
w = width of image
h = height of image
if "output_png" is set, prints an "print_bitmap_output.png" in the same folder using the same
thresholds as the actual printing commands. Useful for seeing if there are problems with the
original image (this requires PIL).
Example code with PIL:
import Image, ImageDraw
i = Image.open("lammas_grayscale-bw.png")
data = list(i.getdata())
w, h = i.size
p.print_bitmap(data, w, h)
"""
counter = 0
if output_png:
import Image, ImageDraw
test_img = Image.new('RGB', (384, h))
draw = ImageDraw.Draw(test_img)
self.linefeed()
black_and_white_pixels = self.convert_pixel_array_to_binary(pixels, w, h)
print_bytes = []
# read the bytes into an array
for rowStart in range(0, h, 256):
chunkHeight = 255 if (h - rowStart) > 255 else h - rowStart
print_bytes += (18, 42, chunkHeight, 48)
for i in range(0, 48 * chunkHeight, 1):
# read one byte in
byt = 0
for xx in range(8):
pixel_value = black_and_white_pixels[counter]
counter += 1
# check if this is black
if pixel_value == 0:
byt += 1 << (7 - xx)
if output_png: draw.point((counter % 384, round(counter / 384)), fill=(0, 0, 0))
# it's white
else:
if output_png: draw.point((counter % 384, round(counter / 384)), fill=(255, 255, 255))
print_bytes.append(byt)
# output the array all at once to the printer
# might be better to send while printing when dealing with
# very large arrays...
for b in print_bytes:
self.printer.write(chr(b))
if output_png:
test_print = open('print-output.png', 'wb')
test_img.save(test_print, 'PNG')
print("output saved to %s" % test_print.name)
test_print.close()
if __name__ == '__main__':
import sys, os
if len(sys.argv) == 2:
serialport = sys.argv[1]
else:
serialport = ThermalPrinter.SERIALPORT
if not os.path.exists(serialport):
sys.exit("ERROR: Serial port not found at: %s" % serialport)
print("Testing printer on port %s" % serialport)
p = ThermalPrinter(serialport=serialport)
p.print_text("\nHello maailma. How's it going?\n")
p.print_text("Part of this ")
p.bold_on()
p.print_text("line is bold\n")
p.bold_off()
p.print_text("Part of this ")
p.font_b_on()
p.print_text("line is fontB\n")
p.font_b_off()
p.justify("R")
p.print_text("right justified\n")
p.justify("C")
p.print_text("centered\n")
p.justify() # justify("L") works too
p.print_text("left justified\n")
p.upsidedown_on()
p.print_text("upside down\n")
p.upsidedown_off()
markup = """bl bold left
ur underline right
fc font b centred (next line blank)
nl
il inverse left
"""
p.print_markup(markup)
# runtime dependency on Python Imaging Library
import Image, ImageDraw
i = Image.open("example-lammas.png")
data = list(i.getdata())
w, h = i.size
p.print_bitmap(data, w, h, True)
p.linefeed()
p.justify("C")
p.barcode_chr("2")
p.barcode("014633098808")
p.linefeed()
p.linefeed()
p.linefeed()
| gpl-2.0 | 3,221,835,558,315,064,000 | 35.5625 | 124 | 0.573504 | false |
alexryndin/ambari | ambari-agent/src/main/python/ambari_agent/HostCleanup.py | 1 | 22471 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# For compatibility with different OSes
# Edit PYTHONPATH to be able to import common_functions
import sys
sys.path.append("/usr/lib/python2.6/site-packages/")
########################################################
import os
import string
import subprocess
import logging
import shutil
import platform
import fnmatch
import ConfigParser
import optparse
import shlex
import datetime
import tempfile
from AmbariConfig import AmbariConfig
from ambari_agent.Constants import AGENT_TMP_DIR
from ambari_commons import OSCheck, OSConst
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
logger = logging.getLogger()
USER_ERASE_CMD = "userdel -rf {0}"
GROUP_ERASE_CMD = "groupdel {0}"
PROC_KILL_CMD = "kill -9 {0}"
ALT_DISP_CMD = "alternatives --display {0}"
ALT_ERASE_CMD = "alternatives --remove {0} {1}"
RUN_HOST_CHECKS_CMD = '/var/lib/ambari-agent/cache/custom_actions/scripts/check_host.py ACTIONEXECUTE {0} /var/lib/ambari-agent/cache/custom_actions {1} INFO {2}'
REPO_PATH_RHEL = "/etc/yum.repos.d"
REPO_PATH_SUSE = "/etc/zypp/repos.d/"
REPO_PATH_UBUNTU = "/etc/apt/sources.list.d"
SKIP_LIST = []
TMP_HOST_CHECK_FILE_NAME = "tmp_hostcheck.result"
HOST_CHECK_FILE_NAME = "hostcheck.result"
HOST_CHECK_CUSTOM_ACTIONS_FILE = "hostcheck_custom_actions.result"
OUTPUT_FILE_NAME = "hostcleanup.result"
PACKAGE_SECTION = "packages"
PACKAGE_KEY = "pkg_list"
USER_SECTION = "users"
USER_KEY = "usr_list"
USER_HOMEDIR_KEY = "usr_homedir_list"
USER_HOMEDIR_SECTION = "usr_homedir"
REPO_SECTION = "repositories"
REPOS_KEY = "repo_list"
DIR_SECTION = "directories"
ADDITIONAL_DIRS = "additional_directories"
DIR_KEY = "dir_list"
CACHE_FILES_PATTERN = {
'alerts': ['*.json']
}
PROCESS_SECTION = "processes"
PROCESS_KEY = "proc_list"
ALT_SECTION = "alternatives"
ALT_KEYS = ["symlink_list", "target_list"]
HADOOP_GROUP = "hadoop"
FOLDER_LIST = ["/tmp"]
# Additional path patterns to find existing directory
DIRNAME_PATTERNS = [
"/tmp/hadoop-", "/tmp/hsperfdata_"
]
# resources that should not be cleaned
REPOSITORY_BLACK_LIST = ["ambari.repo"]
PACKAGES_BLACK_LIST = ["ambari-server", "ambari-agent"]
def get_erase_cmd():
if OSCheck.is_redhat_family():
return "yum erase -y {0}"
elif OSCheck.is_suse_family():
return "zypper -n -q remove {0}"
elif OSCheck.is_ubuntu_family():
return "/usr/bin/apt-get -y -q remove {0}"
else:
raise Exception("Unsupported OS family '{0}', cannot remove package. ".format(OSCheck.get_os_family()))
class HostCleanup:
def resolve_ambari_config(self):
try:
config = AmbariConfig()
if os.path.exists(AmbariConfig.getConfigFile()):
config.read(AmbariConfig.getConfigFile())
else:
raise Exception("No config found, use default")
except Exception, err:
logger.warn(err)
return config
def get_additional_dirs(self):
resultList = []
dirList = set()
for patern in DIRNAME_PATTERNS:
dirList.add(os.path.dirname(patern))
for folder in dirList:
for dirs in os.walk(folder):
for dir in dirs:
for patern in DIRNAME_PATTERNS:
if patern in dir:
resultList.append(dir)
return resultList
def do_cleanup(self, argMap=None):
if argMap:
packageList = argMap.get(PACKAGE_SECTION)
userList = argMap.get(USER_SECTION)
homeDirList = argMap.get(USER_HOMEDIR_SECTION)
dirList = argMap.get(DIR_SECTION)
repoList = argMap.get(REPO_SECTION)
procList = argMap.get(PROCESS_SECTION)
alt_map = argMap.get(ALT_SECTION)
additionalDirList = self.get_additional_dirs()
if userList and not USER_SECTION in SKIP_LIST:
userIds = self.get_user_ids(userList)
if procList and not PROCESS_SECTION in SKIP_LIST:
logger.info("\n" + "Killing pid's: " + str(procList) + "\n")
self.do_kill_processes(procList)
if packageList and not PACKAGE_SECTION in SKIP_LIST:
logger.info("Deleting packages: " + str(packageList) + "\n")
self.do_erase_packages(packageList)
if userList and not USER_SECTION in SKIP_LIST:
logger.info("\n" + "Deleting users: " + str(userList))
self.do_delete_users(userList)
self.do_erase_dir_silent(homeDirList)
self.do_delete_by_owner(userIds, FOLDER_LIST)
if dirList and not DIR_SECTION in SKIP_LIST:
logger.info("\n" + "Deleting directories: " + str(dirList))
self.do_erase_dir_silent(dirList)
if additionalDirList and not ADDITIONAL_DIRS in SKIP_LIST:
logger.info("\n" + "Deleting additional directories: " + str(dirList))
self.do_erase_dir_silent(additionalDirList)
if repoList and not REPO_SECTION in SKIP_LIST:
repoFiles = self.find_repo_files_for_repos(repoList)
logger.info("\n" + "Deleting repo files: " + str(repoFiles))
self.do_erase_files_silent(repoFiles)
if alt_map and not ALT_SECTION in SKIP_LIST:
logger.info("\n" + "Erasing alternatives:" + str(alt_map) + "\n")
self.do_erase_alternatives(alt_map)
return 0
def read_host_check_file(self, config_file_path):
propertyMap = {}
try:
with open(config_file_path, 'r'):
pass
except Exception, e:
logger.error("Host check result not found at: " + str(config_file_path))
return None
try:
config = ConfigParser.RawConfigParser()
config.read(config_file_path)
except Exception, e:
logger.error("Cannot read host check result: " + str(e))
return None
# Initialize map from file
try:
if config.has_option(PACKAGE_SECTION, PACKAGE_KEY):
propertyMap[PACKAGE_SECTION] = config.get(PACKAGE_SECTION, PACKAGE_KEY).split(',')
except:
logger.warn("Cannot read package list: " + str(sys.exc_info()[0]))
try:
if config.has_option(PROCESS_SECTION, PROCESS_KEY):
propertyMap[PROCESS_SECTION] = config.get(PROCESS_SECTION, PROCESS_KEY).split(',')
except:
logger.warn("Cannot read process list: " + str(sys.exc_info()[0]))
try:
if config.has_option(USER_SECTION, USER_KEY):
propertyMap[USER_SECTION] = config.get(USER_SECTION, USER_KEY).split(',')
except:
logger.warn("Cannot read user list: " + str(sys.exc_info()[0]))
try:
if config.has_option(USER_SECTION, USER_HOMEDIR_KEY):
propertyMap[USER_HOMEDIR_SECTION] = config.get(USER_SECTION, USER_HOMEDIR_KEY).split(',')
except:
logger.warn("Cannot read user homedir list: " + str(sys.exc_info()[0]))
try:
if config.has_option(REPO_SECTION, REPOS_KEY):
propertyMap[REPO_SECTION] = config.get(REPO_SECTION, REPOS_KEY).split(',')
except:
logger.warn("Cannot read repositories list: " + str(sys.exc_info()[0]))
try:
if config.has_option(DIR_SECTION, DIR_KEY):
propertyMap[DIR_SECTION] = config.get(DIR_SECTION, DIR_KEY).split(',')
except:
logger.warn("Cannot read dir list: " + str(sys.exc_info()[0]))
try:
alt_map = {}
if config.has_option(ALT_SECTION, ALT_KEYS[0]):
alt_map[ALT_KEYS[0]] = config.get(ALT_SECTION, ALT_KEYS[0]).split(',')
if config.has_option(ALT_SECTION, ALT_KEYS[1]):
alt_map[ALT_KEYS[1]] = config.get(ALT_SECTION, ALT_KEYS[1]).split(',')
if alt_map:
propertyMap[ALT_SECTION] = alt_map
except:
logger.warn("Cannot read alternates list: " + str(sys.exc_info()[0]))
return propertyMap
def get_alternatives_desc(self, alt_name):
command = ALT_DISP_CMD.format(alt_name)
out = None
try:
p1 = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "priority"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
out = p2.communicate()[0]
logger.debug('alternatives --display ' + alt_name + '\n, out = ' + out)
except:
logger.warn('Cannot process alternative named: ' + alt_name + ',' + \
'error: ' + str(sys.exc_info()[0]))
return out
def do_clear_cache(self, cache_root, dir_map=None):
"""
Clear cache dir according to provided root directory
cache_root - root dir for cache directory
dir_map - should be used only for recursive calls
"""
global CACHE_FILES_PATTERN
file_map = CACHE_FILES_PATTERN if dir_map is None else dir_map
remList = []
# Build remove list according to masks
for folder in file_map:
if isinstance(file_map[folder], list): # here is list of file masks/files
for mask in file_map[folder]:
remList += self.get_files_in_dir("%s/%s" % (cache_root, folder), mask)
elif isinstance(file_map[folder], dict): # here described sub-folder
remList += self.do_clear_cache("%s/%s" % (cache_root, folder), file_map[folder])
if dir_map is not None: # push result list back as this is call from stack
return remList
else: # root call, so we have final list
self.do_erase_files_silent(remList)
# Alternatives exist as a stack of symlinks under /var/lib/alternatives/$name
# Script expects names of the alternatives as input
# We find all the symlinks using command, #] alternatives --display $name
# and delete them using command, #] alternatives --remove $name $path.
def do_erase_alternatives(self, alt_map):
if alt_map:
alt_list = alt_map.get(ALT_KEYS[0])
if alt_list:
for alt_name in alt_list:
if alt_name:
out = self.get_alternatives_desc(alt_name)
if not out:
logger.warn('No alternatives found for: ' + alt_name)
continue
else:
alternates = out.split('\n')
if alternates:
for entry in alternates:
if entry:
alt_path = entry.split()[0]
logger.debug('Erasing alternative named: ' + alt_name + ', ' \
'path: ' + alt_path)
command = ALT_ERASE_CMD.format(alt_name, alt_path)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn('Failed to remove alternative: ' + alt_name +
", path: " + alt_path + ", error: " + stderrdata)
# Remove directories - configs
dir_list = alt_map.get(ALT_KEYS[1])
if dir_list:
self.do_erase_dir_silent(dir_list)
return 0
def do_kill_processes(self, pidList):
if pidList:
for pid in pidList:
if pid:
command = PROC_KILL_CMD.format(pid)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.error("Unable to kill process with pid: " + pid + ", " + stderrdata)
return 0
def get_files_in_dir(self, dirPath, filemask = None):
fileList = []
if dirPath:
if os.path.exists(dirPath):
listdir = os.listdir(dirPath)
if listdir:
for link in listdir:
path = dirPath + os.sep + link
if not os.path.islink(path) and not os.path.isdir(path):
if filemask is not None:
if fnmatch.fnmatch(path, filemask):
fileList.append(path)
else:
fileList.append(path)
return fileList
def find_repo_files_for_repos(self, repoNames):
repoFiles = []
osType = OSCheck.get_os_family()
repoNameList = []
for repoName in repoNames:
if len(repoName.strip()) > 0:
repoNameList.append("[" + repoName + "]")
repoNameList.append("name=" + repoName)
if repoNameList:
# get list of files
if OSCheck.is_suse_family():
fileList = self.get_files_in_dir(REPO_PATH_SUSE)
elif OSCheck.is_redhat_family():
fileList = self.get_files_in_dir(REPO_PATH_RHEL)
elif OSCheck.is_ubuntu_family():
fileList = self.get_files_in_dir(REPO_PATH_UBUNTU)
else:
logger.warn("Unsupported OS type, cannot get repository location.")
return []
if fileList:
for filePath in fileList:
with open(filePath, 'r') as file:
content = file.readline()
while (content != "" ):
for repoName in repoNameList:
if content.find(repoName) == 0 and filePath not in repoFiles:
repoFiles.append(filePath)
break;
content = file.readline()
return repoFiles
def do_erase_packages(self, packageList):
packageStr = None
if packageList:
packageStr = ' '.join(packageList)
logger.debug("Erasing packages: " + packageStr)
if packageStr is not None and packageStr:
command = get_erase_cmd().format(packageStr)
if command != '':
logger.debug('Executing: ' + str(command))
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn("Erasing packages failed: " + stderrdata)
else:
logger.info("Erased packages successfully.\n" + stdoutdata)
return 0
def do_erase_dir_silent(self, pathList):
if pathList:
for path in pathList:
if path and os.path.exists(path):
if os.path.isdir(path):
try:
shutil.rmtree(path)
except:
logger.warn("Failed to remove dir: " + path + ", error: " + str(sys.exc_info()[0]))
else:
logger.info(path + " is a file and not a directory, deleting file")
self.do_erase_files_silent([path])
else:
logger.info("Path doesn't exists: " + path)
return 0
def do_erase_files_silent(self, pathList):
if pathList:
for path in pathList:
if path and os.path.exists(path):
try:
os.remove(path)
except:
logger.warn("Failed to delete file: " + path + ", error: " + str(sys.exc_info()[0]))
else:
logger.info("File doesn't exists: " + path)
return 0
def do_delete_group(self):
groupDelCommand = GROUP_ERASE_CMD.format(HADOOP_GROUP)
(returncode, stdoutdata, stderrdata) = self.run_os_command(groupDelCommand)
if returncode != 0:
logger.warn("Cannot delete group : " + HADOOP_GROUP + ", " + stderrdata)
else:
logger.info("Successfully deleted group: " + HADOOP_GROUP)
def do_delete_by_owner(self, userIds, folders):
for folder in folders:
for filename in os.listdir(folder):
fileToCheck = os.path.join(folder, filename)
stat = os.stat(fileToCheck)
if stat.st_uid in userIds:
self.do_erase_dir_silent([fileToCheck])
logger.info("Deleting file/folder: " + fileToCheck)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_user_ids(self, userList):
userIds = []
# No user ids to check in Windows for now
return userIds
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_user_ids(self, userList):
from pwd import getpwnam
userIds = []
if userList:
for user in userList:
if user:
try:
userIds.append(getpwnam(user).pw_uid)
except Exception:
logger.warn("Cannot find user : " + user)
return userIds
def do_delete_users(self, userList):
if userList:
for user in userList:
if user:
command = USER_ERASE_CMD.format(user)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn("Cannot delete user : " + user + ", " + stderrdata)
else:
logger.info("Successfully deleted user: " + user)
self.do_delete_group()
return 0
def is_current_user_root(self):
return os.getuid() == 0
# Run command as sudoer by default, if root no issues
def run_os_command(self, cmd, runWithSudo=True):
if runWithSudo:
cmd = "/var/lib/ambari-agent/"+AMBARI_SUDO_BINARY + " " + cmd
logger.info('Executing command: ' + str(cmd))
if type(cmd) == str:
cmd = shlex.split(cmd)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
(stdoutdata, stderrdata) = process.communicate()
return process.returncode, stdoutdata, stderrdata
def run_check_hosts(self):
config_json = '{"commandParams": {"check_execute_list": "*BEFORE_CLEANUP_HOST_CHECKS*"}}'
with tempfile.NamedTemporaryFile(delete=False) as config_json_file:
config_json_file.write(config_json)
with tempfile.NamedTemporaryFile(delete=False) as tmp_output_file:
tmp_output_file.write('{}')
run_checks_command = RUN_HOST_CHECKS_CMD.format(config_json_file.name, tmp_output_file.name, AGENT_TMP_DIR)
(returncode, stdoutdata, stderrdata) = self.run_os_command(run_checks_command)
if returncode != 0:
logger.warn('Failed to run host checks,\nstderr:\n ' + stderrdata + '\n\nstdout:\n' + stdoutdata)
# Copy file and save with file.# (timestamp)
def backup_file(filePath):
if filePath is not None and os.path.exists(filePath):
timestamp = datetime.datetime.now()
format = '%Y%m%d%H%M%S'
try:
shutil.copyfile(filePath, filePath + "." + timestamp.strftime(format))
except (Exception), e:
logger.warn('Could not backup file "%s": %s' % (str(filePath, e)))
return 0
def get_YN_input(prompt, default):
yes = set(['yes', 'ye', 'y'])
no = set(['no', 'n'])
return get_choice_string_input(prompt, default, yes, no)
def get_choice_string_input(prompt, default, firstChoice, secondChoice):
choice = raw_input(prompt).lower()
if choice in firstChoice:
return True
elif choice in secondChoice:
return False
elif choice is "": # Just enter pressed
return default
else:
print "input not recognized, please try again: "
return get_choice_string_input(prompt, default, firstChoice, secondChoice)
pass
def main():
h = HostCleanup()
config = h.resolve_ambari_config()
hostCheckFileDir = config.get('agent', 'prefix')
hostCheckFilePath = os.path.join(hostCheckFileDir, HOST_CHECK_FILE_NAME)
hostCheckCustomActionsFilePath = os.path.join(hostCheckFileDir, HOST_CHECK_CUSTOM_ACTIONS_FILE)
hostCheckFilesPaths = hostCheckFilePath + "," + hostCheckCustomActionsFilePath
hostCheckResultPath = os.path.join(hostCheckFileDir, OUTPUT_FILE_NAME)
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_false",
default=False, help="output verbosity.")
parser.add_option("-f", "--file", dest="inputfiles",
default=hostCheckFilesPaths,
help="host check result file to read.", metavar="FILE")
parser.add_option("-o", "--out", dest="outputfile",
default=hostCheckResultPath,
help="log file to store results.", metavar="FILE")
parser.add_option("-k", "--skip", dest="skip",
help="(packages|users|directories|repositories|processes|alternatives)." + \
" Use , as separator.")
parser.add_option("-s", "--silent",
action="store_true", dest="silent", default=False,
help="Silently accepts default prompt values")
(options, args) = parser.parse_args()
# set output file
backup_file(options.outputfile)
global logger
logger = logging.getLogger('HostCleanup')
handler = logging.FileHandler(options.outputfile)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# set verbose
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if options.skip is not None:
global SKIP_LIST
SKIP_LIST = options.skip.split(',')
is_root = h.is_current_user_root()
if not is_root:
raise RuntimeError('HostCleanup needs to be run as root.')
if not options.silent:
if "users" not in SKIP_LIST:
delete_users = get_YN_input('You have elected to remove all users as well. If it is not intended then use '
'option --skip \"users\". Do you want to continue [y/n] (y)', True)
if not delete_users:
print 'Exiting. Use option --skip="users" to skip deleting users'
sys.exit(1)
hostcheckfile, hostcheckfileca = options.inputfiles.split(",")
# Manage non UI install
if not os.path.exists(hostcheckfileca):
if options.silent:
print 'Host Check results not found. There is no {0}. Running host checks.'.format(hostcheckfileca)
h.run_check_hosts()
else:
run_check_hosts_input = get_YN_input('Host Check results not found. There is no {0}. Do you want to run host checks [y/n] (y)'.format(hostcheckfileca), True)
if run_check_hosts_input:
h.run_check_hosts()
with open(TMP_HOST_CHECK_FILE_NAME, "wb") as tmp_f:
with open(hostcheckfile, "rb") as f1:
with open(hostcheckfileca, "rb") as f2:
tmp_f.write(f1.read())
tmp_f.write(f2.read())
propMap = h.read_host_check_file(TMP_HOST_CHECK_FILE_NAME)
if propMap:
h.do_cleanup(propMap)
if os.path.exists(config.get('agent', 'cache_dir')):
h.do_clear_cache(config.get('agent', 'cache_dir'))
logger.info('Clean-up completed. The output is at %s' % (str(options.outputfile)))
if __name__ == '__main__':
main()
| apache-2.0 | -6,312,625,193,498,740,000 | 35.243548 | 163 | 0.634952 | false |
MarkusHackspacher/unknown-horizons | horizons/util/startgameoptions.py | 1 | 6231 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from operator import itemgetter
import horizons.globals
from horizons.constants import AI, COLORS
from horizons.util.color import Color
from horizons.util.difficultysettings import DifficultySettings
class StartGameOptions:
def __init__(self, game_identifier):
super().__init__() # TODO: check if this call is needed
self.game_identifier = game_identifier
self._player_list = None
self.trader_enabled = True
self.pirate_enabled = True
self.natural_resource_multiplier = 1
self.disasters_enabled = True
self.force_player_id = None
self.is_map = False
self.is_multiplayer = False
self.is_scenario = False
self.player_name = 'Player'
self.player_color = None
self.ai_players = 0
self.human_ai = AI.HUMAN_AI
# this is used by the map editor to pass along the new map's size
self.map_padding = None
self.is_editor = False
def init_new_world(self, session):
# NOTE: this must be sorted before iteration, cause there is no defined order for
# iterating a dict, and it must happen in the same order for mp games.
for i in sorted(self._get_player_list(), key=itemgetter('id')):
session.world.setup_player(i['id'], i['name'], i['color'], i['clientid'] if self.is_multiplayer else None, i['local'], i['ai'], i['difficulty'])
session.world.set_forced_player(self.force_player_id)
center = session.world.init_new_world(self.trader_enabled, self.pirate_enabled, self.natural_resource_multiplier)
session.view.center(center[0], center[1])
def set_human_data(self, player_name, player_color):
self.player_name = player_name
self.player_color = player_color
def _get_player_list(self):
if self._player_list is not None:
return self._player_list
# for now just make it a bit easier for the AI
difficulty_level = {False: DifficultySettings.DEFAULT_LEVEL, True: DifficultySettings.EASY_LEVEL}
players = []
players.append({
'id': 1,
'name': self.player_name,
'color': Color.get(1) if self.player_color is None else self.player_color,
'local': True,
'ai': self.human_ai,
'difficulty': difficulty_level[bool(self.human_ai)],
})
cur_locale = horizons.globals.fife.get_locale()
# add AI players with a distinct color; if none can be found then use black
for num in range(self.ai_players):
color = Color.get(COLORS.BLACK) # if none can be found then be black
for possible_color in Color.get_defaults():
if possible_color == Color.get(COLORS.BLACK):
continue # black is used by the trader and the pirate
used = any(possible_color == player['color'] for player in players)
if not used:
color = possible_color
break
name = horizons.globals.db.get_random_ai_name(cur_locale, [p['name'] for p in players])
# out of pre-defined names?
if name is None:
name = 'AI' + str(num + 1)
players.append({
'id': num + 2,
'name': name,
'color': color,
'local': False,
'ai': True,
'difficulty': difficulty_level[True],
})
return players
@classmethod
def create_start_multiplayer(cls, game_file, player_list, is_map):
options = StartGameOptions(game_file)
options._player_list = player_list
options.is_map = is_map
options.is_multiplayer = True
return options
@classmethod
def create_start_singleplayer(cls, game_identifier, is_scenario, ai_players,
trader_enabled, pirate_enabled, force_player_id, is_map):
options = StartGameOptions(game_identifier)
options.is_scenario = is_scenario
options.ai_players = ai_players
options.trader_enabled = trader_enabled
options.pirate_enabled = pirate_enabled
options.force_player_id = force_player_id
options.is_map = is_map
return options
@classmethod
def create_start_random_map(cls, ai_players, seed, force_player_id):
from horizons.util.random_map import generate_map_from_seed
options = StartGameOptions(generate_map_from_seed(seed))
options.ai_players = ai_players
options.force_player_id = force_player_id
options.is_map = True
return options
@classmethod
def create_editor_load(cls, map_name):
options = StartGameOptions(map_name)
options.player_name = 'Editor'
options.trader_enabled = False
options.pirate_enabled = False
options.natural_resource_multiplier = 0
options.disasters_enabled = False
options.is_map = True
options.is_editor = True
return options
@classmethod
def create_start_scenario(cls, scenario_file):
options = StartGameOptions(scenario_file)
options.is_scenario = True
return options
@classmethod
def create_start_map(cls, map_name):
options = StartGameOptions(map_name)
options.is_map = True
return options
@classmethod
def create_load_game(cls, saved_game, force_player_id):
options = StartGameOptions(saved_game)
options.force_player_id = force_player_id
return options
@classmethod
def create_game_test(cls, game_identifier, player_list):
options = StartGameOptions(game_identifier)
options._player_list = player_list
options.trader_enabled = False
options.pirate_enabled = False
options.natural_resource_multiplier = 0
return options
@classmethod
def create_ai_test(cls, game_identifier, player_list):
options = StartGameOptions(game_identifier)
options._player_list = player_list
options.is_map = True
return options
| gpl-2.0 | 626,343,943,137,751,400 | 32.5 | 147 | 0.71048 | false |
ConnectedVision/connectedvision | test/UnitTest/GeneratorTestCode.py | 1 | 1271 | import os
import subprocess
if not "ConnectedVision" in os.environ:
raise Exception("\"ConnectedVision\" environment variable is not defined")
cvDir = os.path.abspath(os.environ["ConnectedVision"])
if not os.path.isdir(cvDir):
raise Exception("the directory path referenced by the ConnectedVision environment variable is invalid: " + cvDir)
toolsDir = os.path.join(cvDir, "tools")
targetDir = os.path.join(cvDir, "test", "UnitTest")
schemaFile = os.path.join(targetDir, "GeneratorTestCode.schema")
dirStructureFile = os.path.join(targetDir, "GeneratorTestCodeStructure.json")
print("")
print(schemaFile)
print("--------------------------------")
print("CreateItemsForSchema.js")
subprocess.check_call(["node", os.path.join(toolsDir, "CodeFromTemplate", "CreateItemForSchema.js"), targetDir, cvDir, schemaFile, dirStructureFile, "-id", "UnitTest"])
print("")
print("CodeFromTemplate.js - DATA items")
globalJson = os.path.join(targetDir, "global.json")
itemsJson = os.path.join(targetDir, "items.json")
subprocess.check_call(["node", os.path.join(toolsDir, "CodeFromTemplate", "CodeFromTemplate.js"), globalJson, itemsJson])
print("")
print("Clean Up")
os.remove(globalJson)
os.remove(itemsJson)
os.remove(os.path.join(targetDir, "artefacts.json"))
print("OK")
| mit | 495,858,675,220,353,340 | 35.314286 | 168 | 0.738002 | false |
agoragames/kairos | test/functional/histogram_helper.py | 1 | 14454 | from helper_helper import *
from helper_helper import _time
from collections import OrderedDict
@unittest.skipUnless( os.environ.get('TEST_HISTOGRAM','true').lower()=='true', 'skipping histogram' )
class HistogramHelper(Chai):
def setUp(self):
super(HistogramHelper,self).setUp()
self.series = Timeseries(self.client, type='histogram', prefix='kairos',
read_func=int,
intervals={
'minute' : {
'step' : 60,
'steps' : 5,
},
'hour' : {
'step' : 3600,
'resolution' : 60,
}
} )
self.series.delete_all()
def tearDown(self):
self.series.delete_all()
def test_bulk_insert(self):
inserts = {
None : { 'test1':[1,2,3], 'test2':[4,5,6] },
_time(0) : { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] },
_time(30) : { 'test1':[1,2,3], 'test2':[4,5,6] },
_time(60) : { 'test1':[1,2,3], 'test3':[7,8,9] }
}
self.series.bulk_insert( inserts )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(60))
assert_equals( {1:1, 2:1, 3:1}, t1_i2[_time(60)] )
def test_bulk_insert_intervals_after(self):
a,b,c,d,e,f = 10,11,12,13,14,15
inserts = OrderedDict( (
(None , { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(0) , { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] } ),
(_time(30), { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(60), { 'test1':[a,b,c], 'test3':[d,e,f] })
) )
self.series.bulk_insert( inserts, intervals=3 )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(60))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i2[_time(60)] )
t3_i3 = self.series.get('test3', 'minute', timestamp=_time(120))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i3[_time(120)] )
t3_i4 = self.series.get('test3', 'minute', timestamp=_time(180))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i4[_time(180)] )
def test_bulk_insert_intervals_before(self):
a,b,c,d,e,f = 10,11,12,13,14,15
inserts = OrderedDict( (
(None , { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(0) , { 'test1':[1,2,3], 'test2':[4,5,6], 'test3':[7,8,9] } ),
(_time(30), { 'test1':[1,2,3], 'test2':[4,5,6] } ),
(_time(60), { 'test1':[a,b,c], 'test3':[d,e,f] })
) )
self.series.bulk_insert( inserts, intervals=-3 )
t1_i1 = self.series.get('test1', 'minute', timestamp=_time(0))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i1[_time(0)] )
t2_i1 = self.series.get('test2', 'minute', timestamp=_time(0))
assert_equals( {4:2, 5:2, 6:2}, t2_i1[_time(0)] )
t3_i1 = self.series.get('test3', 'minute', timestamp=_time(0))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i1[_time(0)] )
t1_i2 = self.series.get('test1', 'minute', timestamp=_time(-60))
assert_equals( {1:2, 2:2, 3:2, a:1, b:1, c:1}, t1_i2[_time(-60)] )
t3_i3 = self.series.get('test3', 'minute', timestamp=_time(-120))
assert_equals( {7:1, 8:1, 9:1, d:1, e:1, f:1}, t3_i3[_time(-120)] )
t3_i4 = self.series.get('test3', 'minute', timestamp=_time(-180))
assert_equals( {7:1, 8:1, 9:1}, t3_i4[_time(-180)] )
def test_get(self):
# 2 hours worth of data, value is same asV timestamp
for t in xrange(1, 7200):
self.series.insert( 'test', t/2, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
# middle of an interval
interval = self.series.get( 'test', 'minute', timestamp=_time(100) )
assert_equals( [_time(60)], interval.keys() )
keys = list(range(30,60))
assert_equals( keys, interval[_time(60)].keys() )
for k in keys:
assert_equals( 2, interval[_time(60)][k] )
# no matching interval, returns no with empty value list
interval = self.series.get( 'test', 'minute' )
assert_equals( 1, len(interval) )
assert_equals( 0, len(interval.values()[0]) )
###
### with resolution, optionally condensed
###
interval = self.series.get( 'test', 'hour', timestamp=_time(100) )
keys = list(range(30,60))
assert_equals( 60, len(interval) )
assert_equals( keys, interval[_time(60)].keys() )
interval = self.series.get( 'test', 'hour', timestamp=_time(100), condensed=True )
assert_equals( 1, len(interval) )
assert_equals( list(range(0,1800)), interval[_time(0)].keys() )
def test_get_joined(self):
# put some data in the first minutes of each hour for test1, and then for
# a few more minutes in test2
for t in xrange(1, 120):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(3600, 3720):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(120, 240):
self.series.insert( 'test1', t, timestamp=_time(t) )
for t in xrange(3721, 3840):
self.series.insert( 'test1', t, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
# interval with 2 series worth of data
interval = self.series.get( ['test1','test2'], 'minute', timestamp=_time(100) )
assert_equals( [_time(60)], interval.keys() )
assert_equals( dict.fromkeys(range(60,120),2), interval[_time(60)] )
# interval with 1 series worth of data
interval = self.series.get( ['test1','test2'], 'minute', timestamp=_time(122) )
assert_equals( [_time(120)], interval.keys() )
assert_equals( dict.fromkeys(range(120,180),1), interval[_time(120)] )
# no matching interval, returns no with empty value list
interval = self.series.get( ['test1','test2'], 'minute' )
assert_equals( 1, len(interval) )
assert_equals( 0, len(interval.values()[0]) )
###
### with resolution, optionally condensed
###
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100) )
assert_equals( map(_time,[0,60,120,180]), interval.keys() )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(180)] )
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240),1) )
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), condensed=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with transforms
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), transform='count' )
assert_equals( 120, interval[_time(60)] )
interval = self.series.get( ['test1','test2'], 'hour', timestamp=_time(100), transform=['min','max','count'], condensed=True )
assert_equals( {'min':1, 'max':239, 'count':358}, interval[_time(0)] )
def test_series(self):
# 2 hours worth of data, value is same asV timestamp
for t in xrange(1, 7200):
self.series.insert( 'test', t/2, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
interval = self.series.series( 'test', 'minute', end=_time(250) )
assert_equals( map(_time, [0,60,120,180,240]), interval.keys() )
assert_equals( list(range(0,30)), sorted(interval[_time(0)].keys()) )
assert_equals( 1, interval[_time(0)][0] )
for k in xrange(1,30):
assert_equals(2, interval[_time(0)][k])
assert_equals( list(range(120,150)), sorted(interval[_time(240)].keys()) )
for k in xrange(120,150):
assert_equals(2, interval[_time(240)][k])
interval = self.series.series( 'test', 'minute', steps=2, end=_time(250) )
assert_equals( map(_time, [180,240]), interval.keys() )
assert_equals( list(range(120,150)), sorted(interval[_time(240)].keys()) )
# with collapsed
interval = self.series.series( 'test', 'minute', end=_time(250), collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( list(range(0,150)), sorted(interval[_time(0)].keys()) )
for k in xrange(1,150):
assert_equals(2, interval[_time(0)][k])
###
### with resolution
###
interval = self.series.series( 'test', 'hour', end=_time(250) )
assert_equals( 1, len(interval) )
assert_equals( 60, len(interval[_time(0)]) )
assert_equals( list(range(0,30)), sorted(interval[_time(0)][_time(0)].keys()) )
# single step, last one
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200) )
assert_equals( 1, len(interval) )
assert_equals( 1800, len(interval[_time(3600)]) )
assert_equals( list(range(1800,3600)), sorted(interval[_time(3600)].keys()) )
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200), steps=2 )
assert_equals( map(_time, [0,3600]), interval.keys() )
assert_equals( 1800, len(interval[_time(0)]) )
assert_equals( 1800, len(interval[_time(3600)]) )
assert_equals( list(range(1800,3600)), sorted(interval[_time(3600)].keys()) )
# with collapsed
interval = self.series.series( 'test', 'hour', condensed=True, end=_time(4200), steps=2, collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( 3600, len(interval[_time(0)]) )
assert_equals( list(range(0,3600)), sorted(interval[_time(0)].keys()) )
def test_series_joined(self):
# put some data in the first minutes of each hour for test1, and then for
# a few more minutes in test2
for t in xrange(1, 120):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(3600, 3720):
self.series.insert( 'test1', t, timestamp=_time(t) )
self.series.insert( 'test2', t, timestamp=_time(t) )
for t in xrange(120, 240):
self.series.insert( 'test1', t, timestamp=_time(t) )
for t in xrange(3720, 3840):
self.series.insert( 'test1', t, timestamp=_time(t) )
###
### no resolution, condensed has no impact
###
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250) )
assert_equals( map(_time,[0,60,120,180,240]), interval.keys() )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(180)] )
assert_equals( {}, interval[_time(240)] )
# no matching interval, returns no with empty value list
interval = self.series.series( ['test1','test2'], 'minute', start=time.time(), steps=2 )
assert_equals( 2, len(interval) )
assert_equals( {}, interval.values()[0] )
# with transforms
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), transform=['min','count'] )
assert_equals( map(_time,[0,60,120,180,240]), interval.keys() )
assert_equals( {'min':1, 'count':118}, interval[_time(0)] )
assert_equals( {'min':60, 'count':120}, interval[_time(60)] )
assert_equals( {'min':120, 'count':60}, interval[_time(120)] )
assert_equals( {'min':180, 'count':60}, interval[_time(180)] )
assert_equals( {'min':0, 'count':0}, interval[_time(240)] )
# with collapsed
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), collapse=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with tranforms and collapsed
interval = self.series.series( ['test1','test2'], 'minute', end=_time(250), transform=['min','max', 'count'], collapse=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( {'min':1, 'max':239, 'count':358}, interval[_time(0)] )
###
### with resolution, optionally condensed
###
interval = self.series.series( ['test1','test2'], 'hour', end=_time(250) )
assert_equals( 1, len(interval) )
assert_equals( map(_time,[0,60,120,180]), interval[_time(0)].keys() )
assert_equals( 4, len(interval[_time(0)]) )
assert_equals( dict.fromkeys(range(1,60), 2), interval[_time(0)][_time(0)] )
assert_equals( dict.fromkeys(range(60,120), 2), interval[_time(0)][_time(60)] )
assert_equals( dict.fromkeys(range(120,180), 1), interval[_time(0)][_time(120)] )
assert_equals( dict.fromkeys(range(180,240), 1), interval[_time(0)][_time(180)] )
# condensed
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
interval = self.series.series( ['test1','test2'], 'hour', end=_time(250), condensed=True )
assert_equals( [_time(0)], interval.keys() )
assert_equals( data, interval[_time(0)] )
# with collapsed across multiple intervals
data = dict.fromkeys(range(1,120), 2)
data.update( dict.fromkeys(range(120,240), 1) )
data.update( dict.fromkeys(range(3600,3720), 2) )
data.update( dict.fromkeys(range(3720,3840), 1) )
interval = self.series.series( ['test1','test2'], 'hour', condensed=True, end=_time(4200), steps=2, collapse=True )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( data, interval[_time(0)] )
# with transforms collapsed
interval = self.series.series( ['test1','test2'], 'hour', condensed=True, end=_time(4200), steps=2, collapse=True, transform=['min','max','count'] )
assert_equals( map(_time, [0]), interval.keys() )
assert_equals( {'min':1,'max':3839,'count':718}, interval[_time(0)] )
| bsd-3-clause | -724,267,455,489,234,600 | 42.667674 | 152 | 0.600664 | false |
alex/wal-e | wal_e/worker/s3_deleter.py | 1 | 4203 | import gevent
from gevent import queue
from wal_e import exception
from wal_e import retries
class Deleter(object):
def __init__(self):
# Allow enqueuing of several API calls worth of work, which
# right now allow 1000 key deletions per job.
self.PAGINATION_MAX = 1000
self._q = queue.JoinableQueue(self.PAGINATION_MAX * 10)
self._worker = gevent.spawn(self._work)
self._parent_greenlet = gevent.getcurrent()
self.closing = False
def close(self):
self.closing = True
self._q.join()
self._worker.kill(block=True)
def delete(self, key):
if self.closing:
raise exception.UserCritical(
msg='attempt to delete while closing Deleter detected',
hint='This should be reported as a bug.')
self._q.put(key)
def _work(self):
try:
while True:
# If _cut_batch has an error, it is responsible for
# invoking task_done() the appropriate number of
# times.
page = self._cut_batch()
# If nothing was enqueued, yield and wait around a bit
# before looking for work again.
if not page:
gevent.sleep(1)
continue
# However, in event of success, the jobs are not
# considered done until the _delete_batch returns
# successfully. In event an exception is raised, it
# will be propagated to the Greenlet that created the
# Deleter, but the tasks are marked done nonetheless.
try:
self._delete_batch(page)
finally:
for i in xrange(len(page)):
self._q.task_done()
except KeyboardInterrupt, e:
# Absorb-and-forward the exception instead of using
# gevent's link_exception operator, because in gevent <
# 1.0 there is no way to turn off the alarming stack
# traces emitted when an exception propagates to the top
# of a greenlet, linked or no.
#
# Normally, gevent.kill is ill-advised because it results
# in asynchronous exceptions being raised in that
# greenlet, but given that KeyboardInterrupt is nominally
# asynchronously raised by receiving SIGINT to begin with,
# there nothing obvious being lost from using kill() in
# this case.
gevent.kill(self._parent_greenlet, e)
def _cut_batch(self):
# Attempt to obtain as much work as possible, up to the
# maximum able to be processed by S3 at one time,
# PAGINATION_MAX.
page = []
try:
for i in xrange(self.PAGINATION_MAX):
page.append(self._q.get_nowait())
except queue.Empty:
pass
except:
# In event everything goes sideways while dequeuing,
# carefully un-lock the queue.
for i in xrange(len(page)):
self._q.task_done()
raise
return page
@retries.retry()
def _delete_batch(self, page):
# Check that all keys are in the same bucket; this code is not
# designed to deal with fast deletion of keys from multiple
# buckets at the same time, and not checking this could result
# in deleting similarly named keys from the wrong bucket.
#
# In wal-e's use, homogeneity of the bucket retaining the keys
# is presumed to be always the case.
bucket_name = page[0].bucket.name
for key in page:
if key.bucket.name != bucket_name:
raise exception.UserCritical(
msg='submitted keys are not part of the same bucket',
detail=('The clashing bucket names are {0} and {1}.'
.format(key.bucket.name, bucket_name)),
hint='This should be reported as a bug.')
bucket = page[0].bucket
bucket.delete_keys([key.name for key in page])
| bsd-3-clause | -8,131,093,756,282,691,000 | 37.559633 | 73 | 0.563169 | false |
rnelsonchem/gcmstools | gcmstools/general.py | 1 | 3104 | import os
from urllib.request import urlopen
from IPython.parallel import Client, interactive
import gcmstools.filetypes as gcf
import gcmstools.reference as gcr
import gcmstools.fitting as gcfit
import gcmstools.datastore as gcd
import gcmstools.calibration as gcc
_ROOT = os.path.abspath(os.path.dirname(__file__))
_PWD = os.getcwd()
def get_sample_data(fname=None):
'''Copy sample data to current folder.
Use this function to download sample data as a zip file into the current
folder.
'''
url = "http://gcmstools.rcnelson.com/_downloads/sampledata.zip"
zipdata = urlopen(url)
with open('sampledata.zip', 'wb') as f:
f.write(zipdata.read())
zipdata.close()
def proc_data(data_folder, h5name, multiproc=False, chunk_size=4,
filetype='aia', reffile=None, fittype=None, calfile=None,
picts=False, **kwargs):
if filetype == 'aia':
GcmsObj = gcf.AiaFile
ends = ('CDF', 'AIA', 'cdf', 'aia')
files = os.listdir(data_folder)
files = [f for f in files if f.endswith(ends)]
files = [os.path.join(data_folder, f) for f in files]
ref = None
if reffile:
if reffile.endswith(('txt', 'TXT')):
ref = gcr.TxtReference(reffile, **kwargs)
fit = None
if fittype:
if fittype.lower() == 'nnls':
fit = gcfit.Nnls(**kwargs)
h5 = gcd.GcmsStore(h5name, **kwargs)
if multiproc:
try:
client = Client()
except:
error = "ERROR! You do not have an IPython Cluster running.\n\n"
error += "Start cluster with: ipcluster start -n # &\n"
error += "Where # == the number of processors.\n\n"
error += "Stop cluster with: ipcluster stop"
print(error)
h5.close()
return
dview = client[:]
dview.block = True
dview['ref'] = ref
dview['fit'] = fit
dview['GcmsObj'] = GcmsObj
chunk_size = len(dview)
# Chunk the data so lots of data files aren't opened in memory.
for chunk in _chunker(files, chunk_size):
if multiproc:
datafiles = dview.map_sync(_proc_file,
[(i, kwargs) for i in chunk])
else:
datafiles = [GcmsObj(f, **kwargs) for f in chunk]
if ref:
ref(datafiles)
if fit:
fit(datafiles)
h5.append_gcms(datafiles)
if calfile:
cal = gcc.Calibrate(h5, **kwargs)
cal.curvegen(calfile, picts=picts, **kwargs)
cal.datagen(picts=picts, **kwargs)
h5.compress()
# This function is from: http://stackoverflow.com/questions/434287
def _chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# This function is for the multiproc version.
# Must use the interactive decorator to update the node namespace
@interactive
def _proc_file(file_kwargs):
filename, kwargs = file_kwargs
datafile = GcmsObj(filename, **kwargs)
if ref:
ref(datafile)
if fit:
fit(datafile)
return datafile
| bsd-3-clause | 6,772,476,318,539,915,000 | 27.740741 | 76 | 0.600515 | false |
camilothorne/nasslli2016 | Nasslli16/annotation/savestat.py | 1 | 2647 | '''
Created on 2016
@author: camilothorne
'''
#import re, string, array
from subprocess import call
import os
class SaveStat:
# path : path to report file
# plotfile : path to the plots
# tables : path to the table
# constructor
def __init__(self,table,plotfile1,name):
# self.path = "/home/camilo/mmap-wsd/tex/"+name+".tex"
self.path = os.environ['TEX']+name+"-report.tex"
self.plotfile1 = plotfile1
self.table = table
#print self.table
# building the report
res = self.makeRes(self.table, self.plotfile1, name)
# saving the report
print "###################################################"
print "\n\npreparing report...\n\n"
self.compileFile(self.path, res)
self.fileSave(self.path, res)
# make contingency table
def makeRes(self,table,plotfile1,name):
# plugin table
title = r'\begin{center}\textbf{\Large '+name+'}\end{center}\n'
ntable = title + r'\begin{center}\begin{table}[p]\centering' + "\n"
#print table
myfile = open(table,'r')
myfiler = myfile.read()
ntable = ntable + myfiler
ntable = ntable + "\caption{Results.}\end{table}\end{center}\n\n"
myfile.close()
# complete and return table
fig1 = r'\begin{center}' + "\n\includegraphics[scale=0.8]{" + plotfile1 + "}\n\end{center}\n"
res = ntable + "\n\n" + r'\vspace{0.2cm}' + "\n\n" + fig1 + "\\newpage\n" #+ fig2
return res
# save the table in a .tex file
def fileSave(self,path,res):
myfile = open(path,'w')
myfile.write(res)
myfile.close()
# compile with pdflatex
def compileFile(self,path,res):
myfile = open(path,'w')
myfile.write("\documentclass[a4paper,12pt]{article}")
myfile.write("\n\n")
myfile.write("\usepackage{graphicx}\n")
myfile.write("\usepackage{epstopdf}\n")
myfile.write("\usepackage{rotating}\n")
myfile.write("\usepackage{times}\n")
myfile.write("\n\n")
myfile.write(r'\begin{document}')
myfile.write("\n\n")
myfile.write(res)
myfile.write("\n\n")
myfile.write("\end{document}")
myfile.close()
call(['/usr/bin/pdflatex',
# '-output-directory='+'/home/camilo/workspace-git/RestWSD/results/'+'tex/',
'-output-directory='+os.environ['TEX'],
path],
shell=False)
| gpl-3.0 | -8,763,846,241,750,692,000 | 32.0875 | 101 | 0.529278 | false |
Donkyhotay/MoonPy | zope/app/error/bootstrap.py | 1 | 1372 | ##############################################################################
#
# Copyright (c) 2002, 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap code for error reporting utility.
$Id: bootstrap.py 66293 2006-04-02 10:47:12Z srichter $
"""
import transaction
from zope.app.appsetup.bootstrap import ensureUtility, getInformationFromEvent
from zope.app.error.error import RootErrorReportingUtility
from zope.app.error.interfaces import IErrorReportingUtility
def bootStrapSubscriber(event):
"""Subscriber to the IDataBaseOpenedEvent
Create utility at that time if not yet present
"""
db, connection, root, root_folder = getInformationFromEvent(event)
ensureUtility(root_folder, IErrorReportingUtility, '',
RootErrorReportingUtility, copy_to_zlog=False, asObject=True)
transaction.commit()
connection.close()
| gpl-3.0 | 8,669,597,086,980,536,000 | 35.105263 | 79 | 0.674927 | false |
wlashell/lyrical_page | site_seo/migrations/0002_auto__add_siteurldefaults.py | 1 | 5402 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SiteUrlDefaults'
db.create_table('site_seo_siteurldefaults', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('page_title', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('page_keywords', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('page_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('site_seo', ['SiteUrlDefaults'])
def backwards(self, orm):
# Deleting model 'SiteUrlDefaults'
db.delete_table('site_seo_siteurldefaults')
models = {
'site_content.sitepage': {
'Meta': {'unique_together': "(('site', 'url'),)", 'object_name': 'SitePage'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'custom_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_rte': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['site_content.SitePageTemplateSelection']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'site_content.sitepagetemplateselection': {
'Meta': {'object_name': 'SitePageTemplateSelection'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'site_seo.siteurl': {
'Meta': {'object_name': 'SiteUrl'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sitepages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['site_content.SitePage']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'site_seo.siteurldefaults': {
'Meta': {'object_name': 'SiteUrlDefaults'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'page_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['site_seo'] | apache-2.0 | 3,382,999,590,637,498,400 | 65.703704 | 178 | 0.556831 | false |
dodonator/area51 | Kryptotests/OneTimePad/Alpha/oneTimePy2.py | 1 | 1880 | import os
import random
import getpass
import time
os.system('clear')
def encode(klartext):
'''
Create a random One-Time-Pad and encode the input strings
'''
laengeKlartext = len(klartext)
key = ''
keyArray = []
klartextArray = list(klartext)
geheimtextArray = []
geheimtext = ''
alphabet = []
for i in range(26):
alphabet.append(chr(i+65))
for i in range(26):
alphabet.append(chr(i+97))
for i in range(laengeKlartext): # Diese for-Schleife generiert den Schluessel
keyArray.append(random.choice(alphabet))
for i in range(laengeKlartext): # Diese for-Schleife kuemmert sich um die Codierung
tmpKlartextIndex = alphabet.index(klartextArray[i])
tmpKeyIndex = alphabet.index(keyArray[i])
tmpG = alphabet[(tmpKlartextIndex + tmpKeyIndex) % 52]
geheimtextArray.append(tmpG)
for element in geheimtextArray: # Diese for-Schleife wandelt den Array in einen String
geheimtext += element
for element in keyArray:
key += element
return [geheimtext,key]
def decode(geheimtext,key):
laengeGeheimtext = len(geheimtext)
keyArray = list(key)
geheimArray = list(geheimtext)
klartextArray = []
klartext = ''
alphabet = []
for i in range(26):
alphabet.append(chr(i+65))
for i in range(26):
alphabet.append(chr(i+97))
for i in range(laengeGeheimtext):
tmpGeheimtextIndex = alphabet.index(geheimArray[i])
tmpKeyIndex = alphabet.index(keyArray[i])
tmpDifferenz = tmpGeheimtextIndex - tmpKeyIndex
if tmpDifferenz >= 0:
klartextArray.append(alphabet[tmpDifferenz])
else:
tmpDifferenz = tmpGeheimtextIndex + 52 - tmpKeyIndex
klartextArray.append(alphabet[tmpDifferenz])
for element in klartextArray:
klartext += element
return klartext
klartext = raw_input(': \n')
result = encode(klartext)
print 'Geheimtext: ' + result[0]
print 'Key: ' + result[1]
print 'Enschluesselt: ' + decode(result[0],result[1])
| gpl-3.0 | 5,432,290,994,494,786,000 | 24.066667 | 87 | 0.722872 | false |
Vapekreng/SR | game/room/map_generator/map_generator_data.py | 1 | 1596 | MAP_WIDTH = 80
MAP_HEIGHT = 20
CELL_SIZE_X = 10
CELL_SIZE_Y = 10
CELLS_COUNT_X = MAP_WIDTH // CELL_SIZE_X
CELLS_COUNT_Y = MAP_HEIGHT // CELL_SIZE_Y
MOVE_VECTORS = [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]
ROOMS = dict()
ROOMS['normal'] = dict()
ROOMS['normal']['min width'] = 9
ROOMS['normal']['max width'] = 15
ROOMS['normal']['min height'] = 4
ROOMS['normal']['max height'] = 7
ROOMS['normal']['cell width'] = 2
ROOMS['normal']['cell height'] = 1
ROOMS['normal']['probability'] = 30
ROOMS['small'] = dict()
ROOMS['small']['min width'] = 6
ROOMS['small']['max width'] = 8
ROOMS['small']['min height'] = 6
ROOMS['small']['max height'] = 8
ROOMS['small']['cell width'] = 1
ROOMS['small']['cell height'] = 1
ROOMS['small']['probability'] = 5
ROOMS['high'] = dict()
ROOMS['high']['min width'] = 7
ROOMS['high']['max width'] = 8
ROOMS['high']['min height'] = 7
ROOMS['high']['max height'] = 13
ROOMS['high']['cell width'] = 1
ROOMS['high']['cell height'] = 2
ROOMS['high']['probability'] = 25
ROOMS['wide'] = dict()
ROOMS['wide']['min width'] = 19
ROOMS['wide']['max width'] = 27
ROOMS['wide']['min height'] = 4
ROOMS['wide']['max height'] = 7
ROOMS['wide']['cell width'] = 3
ROOMS['wide']['cell height'] = 1
ROOMS['wide']['probability'] = 35
ROOMS['square'] = dict()
ROOMS['square']['min width'] = 13
ROOMS['square']['max width'] = 17
ROOMS['square']['min height'] = 9
ROOMS['square']['max height'] = 13
ROOMS['square']['cell width'] = 2
ROOMS['square']['cell height'] = 2
ROOMS['square']['probability'] = 5
| gpl-3.0 | 8,906,998,802,736,333,000 | 27.018182 | 85 | 0.577068 | false |
tkolhar/robottelo | tests/foreman/ui/test_oscappolicy.py | 1 | 6585 | from fauxfactory import gen_string
from robottelo.config import settings
from robottelo.constants import (
OSCAP_PERIOD,
OSCAP_PROFILE,
OSCAP_WEEKDAY,
)
from robottelo.datafactory import invalid_values_list, valid_data_list
from robottelo.decorators import skip_if_bug_open, tier1
from robottelo.helpers import get_data_file
from robottelo.test import UITestCase
from robottelo.ui.factory import make_oscapcontent, make_oscappolicy
from robottelo.ui.session import Session
class OpenScapPolicy(UITestCase):
"""Implements Oscap Policy tests in UI."""
@classmethod
def setUpClass(cls):
super(OpenScapPolicy, cls).setUpClass()
cls.content_path = get_data_file(
settings.oscap.content_path
)
@tier1
def test_positive_create_with_policy_name(self):
"""@Test: Create OpenScap Policy.
@Feature: OpenScap Policy - Positive Create.
@Steps:
1. Create an openscap content.
2. Create an openscap Policy.
3. Provide all the appropriate parameters.
@Assert: Whether creating Policy for OpenScap is successful.
"""
content_name = gen_string('alpha')
with Session(self.browser) as session:
make_oscapcontent(
session,
name=content_name,
content_path=self.content_path,
)
self.assertIsNotNone(
self.oscapcontent.search(content_name))
for policy_name in valid_data_list():
with self.subTest(policy_name):
make_oscappolicy(
session,
content=content_name,
name=policy_name,
period=OSCAP_PERIOD['weekly'],
profile=OSCAP_PROFILE['rhccp'],
period_value=OSCAP_WEEKDAY['friday'],
)
self.assertIsNotNone(
self.oscappolicy.search(policy_name))
@tier1
def test_positive_delete_by_policy_name(self):
"""@Test: Create OpenScap Policy.
@Feature: OpenScap Policy - Positive Create.
@Steps:
1. Create an openscap content.
2. Create an openscap Policy.
3. Provide all the appropriate parameters.
4. Delete the openscap Policy.
@Assert: Whether deleting Policy for OpenScap is successful.
"""
content_name = gen_string('alpha')
with Session(self.browser) as session:
make_oscapcontent(
session,
name=content_name,
content_path=self.content_path,
)
self.assertIsNotNone(
self.oscapcontent.search(content_name))
for policy_name in valid_data_list():
with self.subTest(policy_name):
make_oscappolicy(
session,
content=content_name,
name=policy_name,
period=OSCAP_PERIOD['weekly'],
profile=OSCAP_PROFILE['rhccp'],
period_value=OSCAP_WEEKDAY['friday'],
)
self.assertIsNotNone(
self.oscappolicy.search(policy_name))
self.oscappolicy.delete(policy_name)
@skip_if_bug_open('bugzilla', 1293296)
@tier1
def test_negative_create_with_invalid_name(self):
"""@Test: Create OpenScap Policy with negative values.
@Feature: OpenScap Policy - Negative Create.
@Steps:
1. Create an openscap content.
2. Create an openscap Policy.
3. Provide all the appropriate parameters.
@Assert: Creating Policy for OpenScap is not successful.
@BZ: 1293296
"""
content_name = gen_string('alpha')
with Session(self.browser) as session:
make_oscapcontent(
session,
name=content_name,
content_path=self.content_path,
)
self.assertIsNotNone(
self.oscapcontent.search(content_name))
for policy_name in invalid_values_list(interface='ui'):
with self.subTest(policy_name):
make_oscappolicy(
session,
content=content_name,
name=policy_name,
period=OSCAP_PERIOD['weekly'],
profile=OSCAP_PROFILE['rhccp'],
period_value=OSCAP_WEEKDAY['friday'],
)
self.assertIsNone(self.oscappolicy.search(policy_name))
@tier1
def test_positive_update(self):
"""@Test: Update OpenScap Policy.
@Feature: OpenScap Policy - Positive Update.
@Steps:
1. Create an openscap content.
2. Create an openscap Policy.
3. Provide all the appropriate parameters.
4. Update openscap policy with valid values.
@Assert: Updating Policy for OpenScap is successful.
"""
content_name = gen_string('alpha')
policy_name = gen_string('alpha')
with Session(self.browser) as session:
make_oscapcontent(
session,
name=content_name,
content_path=self.content_path,
)
self.assertIsNotNone(
self.oscapcontent.search(content_name))
make_oscappolicy(
session,
content=content_name,
name=policy_name,
period=OSCAP_PERIOD['weekly'],
profile=OSCAP_PROFILE['rhccp'],
period_value=OSCAP_WEEKDAY['friday'],
)
self.assertIsNotNone(
self.oscappolicy.search(policy_name))
for new_policy_name in invalid_values_list(interface='ui'):
with self.subTest(new_policy_name):
self.oscappolicy.update(
name=policy_name,
new_name=new_policy_name,
content=content_name,
profile=OSCAP_PROFILE['usgcb'],
period=OSCAP_PERIOD['weekly'],
period_value=OSCAP_WEEKDAY['sunday'],
)
self.assertIsNotNone(
self.oscappolicy.search(new_policy_name))
policy_name = new_policy_name
| gpl-3.0 | -1,506,491,842,477,906,000 | 34.594595 | 75 | 0.538497 | false |
christophercrouzet/nani | tests/data/particle.py | 1 | 1153 | import numpy
import nani
from . import vector2
_PARTICLE_ID = 0
_PARTICLE_POSITION = 1
_PARTICLE_MASS = 2
_PARTICLE_NEIGHBOURS = 3
class ParticleView(object):
__slots__ = ('_data',)
def __init__(self, data):
self._data = data
def __str__(self):
return (
"Particle(id=%s, position=%s, mass=%s, neighbours=%s)"
% (self.id, self.position, self.mass, self.neighbours)
)
@property
def id(self):
return self._data[_PARTICLE_ID]
@property
def position(self):
return vector2.Vector2View(self._data[_PARTICLE_POSITION])
@property
def mass(self):
return self._data[_PARTICLE_MASS]
@mass.setter
def mass(self, value):
self._data[_PARTICLE_MASS] = value
@property
def neighbours(self):
return self._data[_PARTICLE_NEIGHBOURS]
PARTICLE_TYPE = nani.Structure(
fields=(
('id', nani.Number(type=numpy.uint32, default=-1)),
('position', vector2.VECTOR2_TYPE),
('mass', nani.Number(type=numpy.float32, default=1.0)),
('neighbours', nani.Object()),
),
view=ParticleView
)
| mit | 3,566,905,308,198,292,000 | 19.589286 | 66 | 0.590633 | false |
tuturto/pyherc | src/pyherc/test/unit/test_damage_modifiers.py | 1 | 5523 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for testing damage modification rules
"""
from hamcrest import assert_that, equal_to, is_
from mockito import mock, when
from pyherc.data import Dungeon, Model
from pyherc.data.effects import DamageModifier
from pyherc.ports import set_action_factory
from pyherc.test.builders import (ActionFactoryBuilder, CharacterBuilder,
ItemBuilder, LevelBuilder)
from pyherc.test.cutesy import at_
import pyherc
class TestDamageModifiers():
"""
Tests for damage modifier effect
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.level = None
self.modle = None
self.character1 = None
self.character2 = None
self.rng = None
def setup(self):
"""
Setup for testcases
"""
self.model = Model()
set_action_factory(ActionFactoryBuilder()
.build())
self.character1 = (CharacterBuilder()
.with_model(self.model)
.with_hit_points(10)
.with_attack(3)
.with_body(5)
.build())
self.effect = DamageModifier(modifier=1,
damage_type='crushing',
duration=None,
frequency=None,
tick=None,
icon=101,
title='Weakness against crushing',
description='This character is weak')
self.effect.multiple_allowed = True
self.character2 = (CharacterBuilder()
.with_model(self.model)
.with_hit_points(10)
.with_attack(3)
.with_body(5)
.with_effect(self.effect)
.build())
self.model.dungeon = Dungeon()
self.level = (LevelBuilder()
.with_character(self.character1, at_(5, 5))
.with_character(self.character2, at_(6, 5))
.build())
self.model.dungeon.levels = self.level
self.rng = mock()
when(self.rng).randint(1, 6).thenReturn(1)
def test_damage_is_increased(self):
"""
Test that suffered damage can be modified
"""
pyherc.vtable['\ufdd0:attack'](self.character1,
3)
assert_that(self.character2.hit_points, is_(equal_to(6)))
def test_non_matching_damage_increase_is_not_done(self):
"""
Test that suffered damage is not modified when modifier does not
match with the damage
"""
self.effect.damage_type = 'slashing'
pyherc.vtable['\ufdd0:attack'](self.character1,
3)
assert_that(self.character2.hit_points, is_(equal_to(7)))
def test_multiple_modifiers_are_handled(self):
"""
Test that multiple modifier are taken into account and not skipped
"""
effect_2 = DamageModifier(modifier=3,
damage_type='crushing',
duration=None,
frequency=None,
tick=None,
icon=101,
title='title',
description='description')
effect_2.multiple_allowed = True
self.character2.add_effect(effect_2)
pyherc.vtable['\ufdd0:attack'](self.character1,
3)
assert_that(self.character2.hit_points, is_(equal_to(3)))
def test_melee_combat_is_handled(self):
"""
Damage modifiers should be handled in melee combat too
"""
weapon = (ItemBuilder()
.with_name('hammer')
.with_damage(2, 'crushing')
.build())
self.character1.inventory.weapon = weapon
pyherc.vtable['\ufdd0:attack'](self.character1,
3)
assert_that(self.character2.hit_points, is_(equal_to(7)))
| mit | -6,135,934,850,431,900,000 | 35.335526 | 79 | 0.539019 | false |
iEngage/python-sdk | iengage_client/models/notification.py | 1 | 7552 | # coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Notification(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, notification_id=None, type=None, message=None, date=None, by_user=None, entity=None, parent_entity=None, extra_data=None, read=False):
"""
Notification - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'notification_id': 'int',
'type': 'str',
'message': 'str',
'date': 'datetime',
'by_user': 'User',
'entity': 'Entity',
'parent_entity': 'Entity',
'extra_data': 'str',
'read': 'bool'
}
self.attribute_map = {
'notification_id': 'notificationId',
'type': 'type',
'message': 'message',
'date': 'date',
'by_user': 'byUser',
'entity': 'entity',
'parent_entity': 'parentEntity',
'extra_data': 'extraData',
'read': 'read'
}
self._notification_id = notification_id
self._type = type
self._message = message
self._date = date
self._by_user = by_user
self._entity = entity
self._parent_entity = parent_entity
self._extra_data = extra_data
self._read = read
@property
def notification_id(self):
"""
Gets the notification_id of this Notification.
:return: The notification_id of this Notification.
:rtype: int
"""
return self._notification_id
@notification_id.setter
def notification_id(self, notification_id):
"""
Sets the notification_id of this Notification.
:param notification_id: The notification_id of this Notification.
:type: int
"""
self._notification_id = notification_id
@property
def type(self):
"""
Gets the type of this Notification.
:return: The type of this Notification.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Notification.
:param type: The type of this Notification.
:type: str
"""
self._type = type
@property
def message(self):
"""
Gets the message of this Notification.
:return: The message of this Notification.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this Notification.
:param message: The message of this Notification.
:type: str
"""
self._message = message
@property
def date(self):
"""
Gets the date of this Notification.
:return: The date of this Notification.
:rtype: datetime
"""
return self._date
@date.setter
def date(self, date):
"""
Sets the date of this Notification.
:param date: The date of this Notification.
:type: datetime
"""
self._date = date
@property
def by_user(self):
"""
Gets the by_user of this Notification.
:return: The by_user of this Notification.
:rtype: User
"""
return self._by_user
@by_user.setter
def by_user(self, by_user):
"""
Sets the by_user of this Notification.
:param by_user: The by_user of this Notification.
:type: User
"""
self._by_user = by_user
@property
def entity(self):
"""
Gets the entity of this Notification.
:return: The entity of this Notification.
:rtype: Entity
"""
return self._entity
@entity.setter
def entity(self, entity):
"""
Sets the entity of this Notification.
:param entity: The entity of this Notification.
:type: Entity
"""
self._entity = entity
@property
def parent_entity(self):
"""
Gets the parent_entity of this Notification.
:return: The parent_entity of this Notification.
:rtype: Entity
"""
return self._parent_entity
@parent_entity.setter
def parent_entity(self, parent_entity):
"""
Sets the parent_entity of this Notification.
:param parent_entity: The parent_entity of this Notification.
:type: Entity
"""
self._parent_entity = parent_entity
@property
def extra_data(self):
"""
Gets the extra_data of this Notification.
:return: The extra_data of this Notification.
:rtype: str
"""
return self._extra_data
@extra_data.setter
def extra_data(self, extra_data):
"""
Sets the extra_data of this Notification.
:param extra_data: The extra_data of this Notification.
:type: str
"""
self._extra_data = extra_data
@property
def read(self):
"""
Gets the read of this Notification.
:return: The read of this Notification.
:rtype: bool
"""
return self._read
@read.setter
def read(self, read):
"""
Sets the read of this Notification.
:param read: The read of this Notification.
:type: bool
"""
self._read = read
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -2,243,176,710,946,412,300 | 23.842105 | 186 | 0.532707 | false |
vrbagalkote/avocado-misc-tests-1 | generic/openblas.py | 1 | 2440 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Pooja B Surya <[email protected]>
import os
from avocado import Test
from avocado import main
from avocado.utils import build
from avocado.utils import archive
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import distro
class Openblas(Test):
"""
OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.
This test runs openblas tests
"""
def setUp(self):
smm = SoftwareManager()
detected_distro = distro.detect()
packages = ['make', 'gcc']
if detected_distro.name == "Ubuntu":
packages.append("gfortran")
elif detected_distro.name == "SuSE":
packages.append(["gcc-fortran", "libgfortran4"])
else:
packages.append("gcc-gfortran")
for package in packages:
if not smm.check_installed(package) and not smm.install(package):
self.cancel(' %s is needed for the test to be run' % package)
url = "https://github.com/xianyi/OpenBLAS/archive/develop.zip"
tarball = self.fetch_asset("OpenBLAS-develop.zip", locations=[url],
expire='7d')
archive.extract(tarball, self.srcdir)
openblas_dir = os.path.join(self.srcdir, "OpenBLAS-develop")
openblas_bin_dir = os.path.join(openblas_dir, 'bin')
os.mkdir(openblas_bin_dir)
build.make(openblas_dir, extra_args='FC=gfortran')
build.make(openblas_dir, extra_args='PREFIX=%s install' %
openblas_bin_dir)
self.test_dir = os.path.join(openblas_dir, "test")
def test(self):
result = build. run_make(self.test_dir)
for line in str(result).splitlines():
if '[FAIL]' in line:
self.fail("test failed, Please check debug log for failed"
"test cases")
if __name__ == "__main__":
main()
| gpl-2.0 | -1,176,776,689,350,797,300 | 35.41791 | 78 | 0.643443 | false |
unt-libraries/coda | coda/coda_mdstore/tests/test_utils.py | 1 | 27398 | from datetime import datetime
from django.core.paginator import Page
from django.conf import settings
from lxml import etree, objectify
from codalib import bagatom
from unittest import mock
import pytest
from urllib.error import URLError
from coda_mdstore import factories, models, presentation, views, exceptions
from coda_mdstore.tests import CODA_XML
def convert_etree(tree):
"""
Convert etree object to an objectify object.
"""
return objectify.fromstring(etree.tostring(tree))
class TestPaginateEntries:
"""
Tests for coda_mdstore.views.paginate_entries.
"""
def test_returns_paginator_object(self, rf):
request = rf.get('/')
page = views.paginate_entries(request, [])
assert isinstance(page, Page)
def test_page_number_defaults_to_one(self, rf):
request = rf.get('/', {'page': 'foo'})
page = views.paginate_entries(request, [])
assert page.number == 1
def test_invalid_page_defaults_to_last_page(self, rf):
request = rf.get('/', {'page': 5})
page = views.paginate_entries(request, [1, 2, 3], 1)
assert page.number == 3
class TestPercent:
"""
Tests for coda_mdstore.views.percent.
"""
def test_returns_float(self):
result = views.percent(1, 2)
assert isinstance(result, float)
def test_return_value(self):
result = views.percent(1, 2)
assert result == 50.0
@pytest.mark.django_db
class TestBagSearch:
"""
Tests for coda_mdstore.presentation.bagSearch.
"""
def test_searches_bag_info_objects(self):
bags = factories.FullBagFactory.create_batch(10)
bag = bags[0]
search_term = bag.bag_info_set.first().field_body
results = presentation.bagSearch(search_term)
assert len(results) == 10
def test_search_bags_only(self):
bags = factories.BagFactory.create_batch(10)
bag = bags[0]
results = presentation.bagSearch(bag.name)
assert len(results) == 10
def test_search_returns_no_bags(self):
bag_list = presentation.bagSearch('')
assert len(bag_list) == 0
@pytest.mark.django_db
class TestMakeBagAtomFeed:
"""
Tests for coda_mdstore.presentation.makeBagAtomFeed.
"""
@mock.patch('coda_mdstore.presentation.objectsToXML')
@mock.patch(
'coda_mdstore.presentation.wrapAtom',
lambda *args, **kwargs: etree.Element('atomEntry')
)
def test_with_bag_objects(self, *args):
title = 'test title'
feed_id = 'test-id'
bag_list = factories.FullBagFactory.create_batch(5)
result = presentation.makeBagAtomFeed(bag_list, feed_id, title)
feed = convert_etree(result)
assert len(feed.atomEntry) == 5
assert feed.id == feed_id
assert feed.title == title
assert feed.updated.text is None
assert feed.link.get('href') == feed_id
assert feed.link.get('rel') == 'self'
def test_without_bag_objects(self):
title = 'test title'
feed_id = 'test-id'
bag_list = []
result = presentation.makeBagAtomFeed(bag_list, feed_id, title)
feed = convert_etree(result)
assert feed.id == feed_id
assert feed.title == title
assert feed.updated.text is None
assert feed.link.get('href') == feed_id
assert feed.link.get('rel') == 'self'
assert feed.countchildren() == 4
@pytest.mark.django_db
class TestObjectsToXML:
"""
Tests for coda_mdstore.presentation.objectsToXML.
"""
def test_bag_attribute_conversion(self):
bag = factories.FullBagFactory.create()
tree = presentation.objectsToXML(bag)
bag_xml = convert_etree(tree)
assert bag_xml.name == bag.name
assert bag_xml.fileCount == bag.files
assert bag_xml.payloadSize == bag.size
assert bag_xml.lastVerified, 'lastVerified should not be empty'
assert str(bag_xml.bagitVersion) == bag.bagit_version
def test_bag_info_attribute_conversion(self):
bag = factories.FullBagFactory.create()
tree = presentation.objectsToXML(bag)
bag_xml = convert_etree(tree)
for i, bag_info in enumerate(bag.bag_info_set.all()):
bag_info_xml = bag_xml.bagInfo.item[i]
assert bag_info_xml.name.text == bag_info.field_name
assert bag_info_xml.body.text == bag_info.field_body
assert bag_info_xml.countchildren() == 2
class TestNodeEntry:
"""
Tests for coda_mdstore.presentation.nodeEntry.
"""
def test_xml_has_node_attributes(self):
node = factories.NodeFactory.build()
tree = presentation.nodeEntry(node)
xml_obj = convert_etree(tree)
node_last_checked = node.last_checked.strftime(
bagatom.TIME_FORMAT_STRING
)
assert xml_obj.content.node.name == node.node_name
assert xml_obj.content.node.capacity == node.node_capacity
assert xml_obj.content.node.size == node.node_size
assert xml_obj.content.node.path == node.node_path
assert xml_obj.content.node.url == node.node_url
assert xml_obj.content.node.last_checked == node_last_checked
assert xml_obj.content.node.status == node.get_status_display()
assert xml_obj.content.node.countchildren() == 7
def test_xml_id(self):
node = factories.NodeFactory.build()
web_root = 'example.com'
tree = presentation.nodeEntry(node, web_root)
xml_obj = convert_etree(tree)
assert web_root in xml_obj.id.text
def test_xml_title(self):
node = factories.NodeFactory.build()
tree = presentation.nodeEntry(node)
xml_obj = convert_etree(tree)
assert xml_obj.title == node.node_name
def test_xml_has_author_name_element(self):
node = factories.NodeFactory.build()
tree = presentation.nodeEntry(node)
xml_obj = convert_etree(tree)
assert hasattr(xml_obj.author, 'name')
def test_xml_has_author_uri_element(self):
node = factories.NodeFactory.build()
tree = presentation.nodeEntry(node)
xml_obj = convert_etree(tree)
assert hasattr(xml_obj.author, 'uri')
@pytest.mark.django_db
class TestUpdateNode:
"""
Tests for coda_mdstore.presentation.updateNode.
"""
def test_node_not_found_raises_exceptions(self, rf):
node = factories.NodeFactory.build()
node_tree = presentation.nodeEntry(node)
node_xml = etree.tostring(node_tree)
url = '/node/{0}/'.format(node.node_name)
request = rf.post(url, node_xml, 'application/xml')
with pytest.raises(models.Node.DoesNotExist):
presentation.updateNode(request)
def test_raises_bad_node_name_exception(self, rf):
node = factories.NodeFactory.build()
node_tree = presentation.nodeEntry(node)
node_xml = etree.tostring(node_tree)
node.save()
request = rf.post('/', node_xml, 'application/xml')
with pytest.raises(exceptions.BadNodeName):
presentation.updateNode(request)
def test_node_updated(self, rf):
node = factories.NodeFactory.build()
node.save()
node.node_size = '0'
node_tree = presentation.nodeEntry(node)
node_xml = etree.tostring(node_tree)
url = '/node/{0}/detail'.format(node.node_name)
request = rf.post(url, node_xml, 'application/xml')
updated_node = presentation.updateNode(request)
assert updated_node.node_size == 0
class TestCreateNode:
"""
Tests for coda_mdstore.presentation.createNode.
"""
def test_returns_node_object(self, rf):
node = factories.NodeFactory.build()
node_tree = presentation.nodeEntry(node)
node_xml = etree.tostring(node_tree)
request = rf.post('/', node_xml, 'application/xml')
created_node = presentation.createNode(request)
assert isinstance(created_node, models.Node)
def test_created_node_attributes(self, rf):
node = factories.NodeFactory.build()
node_tree = presentation.nodeEntry(node)
node_xml = etree.tostring(node_tree)
request = rf.post('/', node_xml, 'application/xml')
created_node = presentation.createNode(request)
assert node.node_name == created_node.node_name
assert node.node_capacity == created_node.node_capacity
assert node.node_size == created_node.node_size
assert node.node_path == created_node.node_path
assert node.node_url == created_node.node_url
# Verify that the attribute exists, but do not attempt to guess
# the value.
assert hasattr(node, 'last_checked')
class TestXmlToBagObject:
@pytest.fixture
def bag_xml(self):
xml = """
<bag:codaXML xmlns:bag="http://digital2.library.unt.edu/coda/bagxml/">
<bag:name>ark:/{ark_naan}/coda2</bag:name>
<bag:fileCount>43</bag:fileCount>
<bag:payloadSize>46259062</bag:payloadSize>
<bag:bagitVersion>0.96</bag:bagitVersion>
<bag:lastStatus>fail</bag:lastStatus>
<bag:lastVerified>2015-01-01</bag:lastVerified>
<bag:baggingDate>2015-01-01</bag:baggingDate>
<bag:bagInfo>
<bag:item>
<bag:name>Bagging-Date</bag:name>
<bag:body>2009-09-24</bag:body>
</bag:item>
<bag:item>
<bag:name>Payload-Oxum</bag:name>
<bag:body>46259062.43</bag:body>
</bag:item>
</bag:bagInfo>
</bag:codaXML>
""".format(ark_naan=settings.ARK_NAAN)
return objectify.fromstring(xml)
def test_name_not_set(self, bag_xml):
del bag_xml.name
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag is None
assert bag_infos is None
assert error == "Unable to set 'name' attribute"
def test_name_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.name == bag_xml.name
assert error is None
def test_fileCount_not_set(self, bag_xml):
del bag_xml.fileCount
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag is None
assert bag_infos is None
assert error == "Unable to set 'files' attribute"
def test_fileCount_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.files == str(bag_xml.fileCount)
assert error is None
def test_payloadSize_not_set(self, bag_xml):
del bag_xml.payloadSize
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag is None
assert bag_infos is None
assert "Unable to set 'size' attribute" in error
def test_payloadSize_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.size == bag_xml.payloadSize
assert error is None
def test_lastStatus_not_set(self, bag_xml):
del bag_xml.lastStatus
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.last_verified_status == 'pass'
assert error is None
def test_lastStatus_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.last_verified_status == bag_xml.lastStatus
assert error is None
def test_lastVerified_not_set(self, bag_xml):
del bag_xml.lastVerified
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert isinstance(bag.last_verified_date, datetime)
assert error is None
def test_lastVerified_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert isinstance(bag.last_verified_date, datetime)
assert error is None
def test_bagitVersion_not_set(self, bag_xml):
del bag_xml.bagitVersion
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.bagit_version == ''
assert error is None
def test_bagitVersion_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert bag.bagit_version == str(bag_xml.bagitVersion)
assert error is None
def test_baggingDate_not_set(self, bag_xml):
del bag_xml.baggingDate
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert isinstance(bag.bagging_date, datetime)
assert error is None
def test_baggingDate_is_set(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert isinstance(bag.bagging_date, datetime)
assert error is None
def test_has_bag_info_objects(self, bag_xml):
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert len(bag_infos) == 2
# Verify that all of the bag_infos are instances of models.Bag_Info
assert all([isinstance(m, models.Bag_Info) for m in bag_infos])
def test_has_no_bag_info_objects(self, bag_xml):
# Remove all of the bagInfo items from bag_xml.
del bag_xml.bagInfo.item[0:]
bag, bag_infos, error = presentation.xmlToBagObject(bag_xml)
assert len(bag_infos) == 0
@pytest.fixture
def bag_xml():
xml = """<?xml version="1.0"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<title>ark:/{ark_naan}/coda2</title>
<id>ark:/{ark_naan}/coda2</id>
<updated>2013-06-05T17:05:33Z</updated>
<author>
<name>server</name>
</author>
<content type="application/xml">
<bag:codaXML xmlns:bag="http://digital2.library.unt.edu/coda/bagxml/">
<bag:name>ark:/{ark_naan}/coda2</bag:name>
<bag:fileCount>43</bag:fileCount>
<bag:payloadSize>46259062</bag:payloadSize>
<bag:bagitVersion>0.96</bag:bagitVersion>
<bag:lastVerified/>
<bag:bagInfo>
<bag:item>
<bag:name>Bag-Size</bag:name>
<bag:body>51.26M</bag:body>
</bag:item>
<bag:item>
<bag:name>Tag-File-Character-Encoding</bag:name>
<bag:body>UTF-8</bag:body>
</bag:item>
</bag:bagInfo>
</bag:codaXML>
</content>
</entry>
""".format(ark_naan=settings.ARK_NAAN)
return objectify.fromstring(xml)
@pytest.mark.django_db
@pytest.mark.usefixture('bag_xml')
class TestCreateBag:
"""
Tests for coda_mdstore.presentation.createBag.
"""
@pytest.mark.xfail(reason='Exception in function will never be raised.')
def test_raises_exception_when_xml_cannot_be_parsed(self):
with pytest.raises(Exception) as e:
presentation.createBag('')
assert str(e) == 'Unable to parse uploaded XML'
@pytest.mark.xfail(reason='Exception in function will never be raised.')
def test_raises_exception_when_content_element_not_present(self):
with pytest.raises(Exception) as e:
presentation.createBag('<root/>')
assert str(e) == 'No content element located'
@mock.patch('coda_mdstore.presentation.xmlToBagObject')
def test_raises_exception_when_xmlToBagObject_reports_error(self, mock, bag_xml):
mock.return_value = (None, None, 'Fake error')
xml_str = etree.tostring(bag_xml)
with pytest.raises(Exception) as e:
presentation.createBag(xml_str)
assert 'codaXML issue,' in str(e)
def test_returns_bag_object(self, bag_xml):
xml_str = etree.tostring(bag_xml)
created_bag, created_bag_infos = presentation.createBag(xml_str)
assert isinstance(created_bag, models.Bag)
def test_returns_bag_info_objects(self, bag_xml):
xml_str = etree.tostring(bag_xml)
created_bag, created_bag_infos = presentation.createBag(xml_str)
for bag_info in created_bag_infos:
assert isinstance(bag_info, models.Bag_Info)
def test_existing_bag_info_objects_are_deleted(self, bag_xml):
"""
Test that existing Bag_Info objects are removed when a Bag is created
with createBag.
This test relies on the fact that the Bag_Info objects created from the
FullBagFactory are different from the bagInfo items in the bag_xml
fixture.
"""
# Create a bag with the same name as the bag_xml fixture.
name = str(bag_xml.content[CODA_XML].name)
bag = factories.FullBagFactory.create(name=name)
# Unpack the queryset now so the query is executed
old_bag_info1, old_bag_info2 = bag.bag_info_set.all()
xml_str = etree.tostring(bag_xml)
created_bag, created_bag_infos = presentation.createBag(xml_str)
# Verify that the each of the previous bag_info objects are not
# in the list of created bag_info objects returned from createBag.
assert old_bag_info1.field_name not in [b.field_name for b in created_bag_infos]
assert old_bag_info2.field_name not in [b.field_name for b in created_bag_infos]
@pytest.mark.django_db
@pytest.mark.usefixture('bag_xml')
class TestUpdateBag:
"""
Tests for coda_mdstore.presentation.updateBag.
"""
def test_returns_bag(self, bag_xml, rf):
bag = factories.FullBagFactory.create(name='ark:/%d/coda2' % settings.ARK_NAAN)
xml_str = etree.tostring(bag_xml)
uri = '/APP/bag/{0}/'.format(bag.name)
request = rf.post(uri, xml_str, 'application/xml')
updated_bag = presentation.updateBag(request)
assert isinstance(updated_bag, models.Bag)
def test_bag_is_updated(self, bag_xml, rf):
bag = factories.FullBagFactory.create(name='ark:/%d/coda2' % settings.ARK_NAAN)
xml_str = etree.tostring(bag_xml)
uri = '/APP/bag/{0}/'.format(bag.name)
request = rf.post(uri, xml_str, 'application/xml')
updated_bag = presentation.updateBag(request)
bag_tree = bag_xml.content[CODA_XML]
assert updated_bag.name == bag_tree.name
assert updated_bag.size == bag_tree.payloadSize
assert updated_bag.bagit_version == str(bag_tree.bagitVersion)
assert updated_bag.files == str(bag_tree.fileCount)
assert updated_bag.bag_info_set.count() == 2
assert updated_bag.external_identifier_set.count() == 0
def test_raises_bad_bag_name_exception(self, bag_xml, rf):
factories.FullBagFactory.create(name='ark:/%d/coda2' % settings.ARK_NAAN)
xml_str = etree.tostring(bag_xml)
request = rf.post('/', xml_str, 'application/xml')
with pytest.raises(exceptions.BadBagName):
presentation.updateBag(request)
def test_bag_object_not_found_raises_exception(self, bag_xml, rf):
factories.FullBagFactory.create()
xml_str = etree.tostring(bag_xml)
# FIXME: Duplication between the test and the test fixture
uri = '/APP/bag/ark:/%d/coda2/' % settings.ARK_NAAN
request = rf.post(uri, xml_str, 'application/xml')
with pytest.raises(models.Bag.DoesNotExist):
presentation.updateBag(request)
def test_existing_bag_info_objects_are_update(self, bag_xml, rf):
"""
Test that existing Bag_Info objects are removed when a Bag is updated
with updateBag.
This test relies on the fact that the Bag_Info objects created from the
FullBagFactory are different from the bagInfo items in the bag_xml
fixture.
"""
# Create a bag with the same name as the bag_xml fixture.
name = str(bag_xml.content[CODA_XML].name)
bag = factories.FullBagFactory.create(name=name)
# Unpack the queryset now so the query is executed
old_bag_info1, old_bag_info2 = bag.bag_info_set.all()
# Compose the request to be passed to updateBag.
xml_str = etree.tostring(bag_xml)
uri = '/APP/bag/{0}/'.format(bag.name)
request = rf.post(uri, xml_str, 'application/xml')
updated_bag = presentation.updateBag(request)
# Verify that the each of the previous bag_info objects are not in the
# related set of the new bag object returned from updateBag.
update_bag_infos = updated_bag.bag_info_set.all()
assert old_bag_info1.field_name not in [b.field_name for b in update_bag_infos]
assert old_bag_info2.field_name not in [b.field_name for b in update_bag_infos]
@mock.patch('coda_mdstore.presentation.urllib.request.urlopen')
def test_getFileList(mock_urlopen):
"""Test all attribute values are extracted as files."""
text = b"""<html>
<body>
<tr> <td>test</td> <td>data</td> </tr>
<tr> <td>of </td> </tr>
<tr> <td>
<a href='bag-info.txt'>url</a>
<a href='manifest-md5.txt'>here</a>
<a href='bagit.txt'>here</a>
</td> </tr>
</body>
</html>"""
mock_urlopen.return_value = text
filelist = presentation.getFileList('https://coda/testurl')
assert ['bag-info.txt', 'manifest-md5.txt', 'bagit.txt'] == filelist
@mock.patch('requests.get')
def test_file_chunk_generator(mock_get):
"""Test chunks of data is generated."""
url = 'www.example.com'
mock_data = ['This', 'is', 'to', 'test', 'streaming', 'data.']
mock_get.return_value.status_code = 200
mock_get.return_value.iter_content.return_value = mock_data
chunk = list(presentation.file_chunk_generator(url))
assert chunk == mock_data
mock_get.assert_called_once_with(url, stream=True)
@mock.patch('requests.get')
def test_file_chunk_generator_with_bad_url(mock_get):
"""Test empty generator is returned when a bad url is given."""
url = 'www.example.com'
mock_get.return_value.status_code = 404
chunk = list(presentation.file_chunk_generator(url))
assert chunk == []
mock_get.assert_called_once_with(url, stream=True)
@mock.patch('coda_mdstore.presentation.file_chunk_generator')
def test_zip_file_streamer(mock_gen):
"""Test files are streamed."""
urls = [
'http://www.example.com/coda123/manifest-md5.txt',
'http://www.example.com/coda123/bagit.txt',
'http://www.example.com/coda123/bag-info.txt'
]
meta_id = 'coda123'
mock_data_1 = [b'Test1', b'manifest', b'data1']
mock_data_2 = [b'Test2', b'bagit', b'data2']
mock_data_3 = [b'Test3', b'baginfo', b'data3']
mock_gen.side_effect = [iter(mock_data_1), iter(mock_data_2), iter(mock_data_3)]
chunk = list(presentation.zip_file_streamer(urls, meta_id))
for data in mock_data_1, mock_data_2, mock_data_3:
for val in data:
assert val in chunk
assert mock_gen.call_count == 3
class TestGenerateBagFiles:
"""
Tests for coda_mdstore.presentation.generateBagFiles.
"""
@mock.patch('coda_mdstore.presentation.getFileHandle')
def test_file_handle_error(self, mock_handle):
mock_handle.side_effect = presentation.FileHandleError()
identifier = 'ark:/%d/coda2' % settings.ARK_NAAN
with pytest.raises(presentation.FileHandleError) as exc:
presentation.generateBagFiles(identifier=identifier,
proxyRoot='',
proxyMode=True)
assert str(exc.value) == 'Unable to get handle for id %s' % (identifier)
@mock.patch('coda_mdstore.presentation.getFileList')
@mock.patch('coda_mdstore.presentation.getFileHandle')
def test_bag_files_with_proxyroot(self, mock_handle, mock_file_list):
mock_file_list.return_value = ['bagit.txt', 'bag-info.txt']
mock_handle.return_value.readline.side_effect = [
b'192e635b17a9c2aea6181f0f87cab05d data/file01.txt',
b'18b7c500ef8bacf7b2151f83d28e7ca1 data/file02.txt',
b'']
identifier = 'ark:/%d/coda1' % settings.ARK_NAAN
transList = presentation.generateBagFiles(identifier=identifier,
proxyRoot='https://example.com/',
proxyMode=True)
assert transList == ['https://example.com/bag/ark:/67531/coda1/data/file01.txt',
'https://example.com/bag/ark:/67531/coda1/data/file02.txt',
'https://example.com/bag/ark:/67531/coda1/bagit.txt',
'https://example.com/bag/ark:/67531/coda1/bag-info.txt']
assert mock_handle.call_count == 2
@mock.patch('coda_mdstore.presentation.getFileList')
@mock.patch('coda_mdstore.presentation.getFileHandle')
def test_bag_files_with_topfiles_bagroot(self, mock_handle, mock_file_list):
mock_file_list.return_value = ['bagit.txt', 'bag-info.txt']
mock_handle.return_value.url = 'https://coda/testurl'
mock_handle.return_value.readline.side_effect = [
b'192e635b17a9c2aea6181f0f87cab05d data/file01.txt',
b'18b7c500ef8bacf7b2151f83d28e7ca1 data/file02.txt',
b'']
identifier = 'ark:/%d/coda2' % settings.ARK_NAAN
transList = presentation.generateBagFiles(identifier=identifier,
proxyRoot='https://example.com/',
proxyMode=False)
assert transList == ['https://coda/data/file01.txt',
'https://coda/data/file02.txt',
'https://coda/bagit.txt',
'https://coda/bag-info.txt']
assert mock_handle.call_count == 2
assert mock_file_list.call_count == 1
@pytest.mark.django_db
class TestGetFileHandle:
"""
Tests for coda_mdstore.presentation.getFileHandle.
"""
@mock.patch('urllib.request.urlopen')
def test_getFileHandle(self, mock_urlopen):
"""Test file handle of first node object with valid node_url is returned."""
factories.NodeFactory.create_batch(3)
codaId = 'ark:/67531/coda1s9ns'
codaPath = 'manifest-md5.txt'
url = 'http://example.com/coda-001/store/pairtree_root/co/da/' \
'1s/9n/s/coda1s9ns/manifest-md5.txt'
mock_url_obj = mock.Mock()
mock_url_obj.url = url
# Raise exception for the first node object url and return mock objects for the rest.
mock_urlopen.side_effect = [URLError('Unknown host'), mock_url_obj, mock.Mock()]
fileHandle = presentation.getFileHandle(codaId=codaId, codaPath=codaPath)
assert fileHandle.url == url
assert mock_urlopen.call_count == 2
def test_getFileHandle_no_node(self):
codaId = 'ark:/67531/coda1s9ns'
codaPath = 'manifest.txt'
with pytest.raises(presentation.FileHandleError) as exc:
presentation.getFileHandle(codaId=codaId, codaPath=codaPath)
assert str(exc.value) == 'Unable to get handle for id %s at path %s'\
% (codaId, codaPath)
| bsd-3-clause | -146,806,818,711,896,960 | 35.974359 | 93 | 0.623878 | false |
RobertABT/heightmap | region.py | 1 | 2223 | from numpy import *
class Region:
xllc = 0 #<--x
yllc = 0 #<--y
nrows = 3 #<-- b
ncols = 3 #<-- a
step = 50
grid = [[1,2,3], [4,5,6], [3,8,9] ]
#Reading files, retrieving integers and creating an array.
def read (self, filename):
if filename is None:
print("Your Grid Reference format is incorrect for UK!")
return False
try:
file = open(filename,'r')
except:
print("No such Grid Reference in the UK!")
return False
a = file.readline().split()
self.ncols = int(a[1])
b = file.readline().split()
self.nrows = int(b[1])
x = file.readline().split()
self.xllc = int(x[1])
y = file.readline().split()
self.yllc = int(y[1])
z = file.readline().split()
self.step = int(z[1])
file.close
self.grid = loadtxt(filename, skiprows=5)
return True
#Retrieving files according to grid references.
def readgr(self, gridsqr):
thepath = "data/" + gridsqr[0:3].upper()
if len(gridsqr) > 12:
thepath = None
elif len(gridsqr) == 12:
thepath = thepath + gridsqr[7]
elif len(gridsqr) == 10:
thepath = thepath + gridsqr[6]
elif len(gridsqr) == 8:
thepath = thepath + gridsqr[5]
elif len(gridsqr) == 6:
thepath = thepath + gridsqr[4]
elif len(gridsqr) == 4:
thepath = thepath + gridsqr[3]
else:
thepath = None
if thepath != None:
thepath = thepath + ".asc"
self.read(thepath)
if __name__ == "__main__":
#Defining global variable.
region = Region()
#Users input
region.readgr("SN43567 43567")
#Printing values.
print("------------")
print(region.xllc)
print("xllcorner")
print("------------")
print(region.yllc)
print("yllcorner")
print("------------")
print(region.ncols)
print("ncolumns")
print("------------")
print(region.nrows)
print("nrows")
print("------------")
| mit | -6,753,461,964,680,506,000 | 22.648936 | 68 | 0.482231 | false |
CCI-MOC/GUI-Backend | api/v1/serializers/export_request_serializer.py | 1 | 1091 | from core.models.export_request import ExportRequest
from core.models.user import AtmosphereUser
from core.models.instance import Instance
from rest_framework import serializers
class ExportRequestSerializer(serializers.ModelSerializer):
"""
"""
name = serializers.CharField(source='export_name')
instance = serializers.SlugRelatedField(
slug_field='provider_alias',
queryset=Instance.objects.all()
)
status = serializers.CharField(default="pending")
disk_format = serializers.CharField(source='export_format')
owner = serializers.SlugRelatedField(slug_field='username',
source='export_owner',
queryset=AtmosphereUser.objects.all()
)
file = serializers.CharField(read_only=True, default="",
required=False, source='export_file')
class Meta:
model = ExportRequest
fields = ('id', 'instance', 'status', 'name',
'owner', 'disk_format', 'file')
| apache-2.0 | 9,100,617,223,407,332,000 | 37.964286 | 78 | 0.605866 | false |
garnermccloud/OLD_DJANGO | Listigain/migrations/0009_auto__chg_field_task_time_created.py | 1 | 5764 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Task.time_created'
db.alter_column('Listigain_task', 'time_created', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# Changing field 'Task.time_created'
db.alter_column('Listigain_task', 'time_created', self.gf('django.db.models.fields.DateTimeField')())
models = {
'Listigain.end_time': {
'Meta': {'object_name': 'End_Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Listigain.Task']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2013, 5, 19)'})
},
'Listigain.start_time': {
'Meta': {'object_name': 'Start_Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Listigain.Task']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2013, 5, 19)'})
},
'Listigain.task': {
'Meta': {'object_name': 'Task'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_due': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2013, 5, 19)'}),
'duration_est': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '4'}),
'duration_max': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '4'}),
'enjoyment': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '1'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2013, 5, 19)', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 19, 19, 8, 43, 538000)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 19, 19, 8, 43, 538000)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Listigain']
| apache-2.0 | -3,049,367,328,527,508,500 | 66.023256 | 163 | 0.550659 | false |
emmanuelle/scikits.image | skimage/feature/texture.py | 2 | 9818 | """
Methods to characterize image textures.
"""
import math
import numpy as np
from scipy import ndimage
from ._texture import _glcm_loop, _local_binary_pattern
def greycomatrix(image, distances, angles, levels=256, symmetric=False,
normed=False):
"""Calculate the grey-level co-occurrence matrix.
A grey level co-occurence matrix is a histogram of co-occuring
greyscale values at a given offset over an image.
Parameters
----------
image : array_like of uint8
Integer typed input image. The image will be cast to uint8, so
the maximum value must be less than 256.
distances : array_like
List of pixel pair distance offsets.
angles : array_like
List of pixel pair angles in radians.
levels : int, optional
The input image should contain integers in [0, levels-1],
where levels indicate the number of grey-levels counted
(typically 256 for an 8-bit image). The maximum value is
256.
symmetric : bool, optional
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
is accomplished by ignoring the order of value pairs, so both
(i, j) and (j, i) are accumulated when (i, j) is encountered
for a given offset. The default is False.
normed : bool, optional
If True, normalize each matrix `P[:, :, d, theta]` by dividing
by the total number of accumulated co-occurrences for the given
offset. The elements of the resulting matrix sum to 1. The
default is False.
Returns
-------
P : 4-D ndarray
The grey-level co-occurrence histogram. The value
`P[i,j,d,theta]` is the number of times that grey-level `j`
occurs at a distance `d` and at an angle `theta` from
grey-level `i`. If `normed` is `False`, the output is of
type uint32, otherwise it is float64.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
.. [2] Pattern Recognition Engineering, Morton Nadler & Eric P.
Smith
.. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix
Examples
--------
Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
for a 1-pixel offset upwards.
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> result = greycomatrix(image, [1], [0, np.pi/2], levels=4)
>>> result[:, :, 0, 0]
array([[2, 2, 1, 0],
[0, 2, 0, 0],
[0, 0, 3, 1],
[0, 0, 0, 1]], dtype=uint32)
>>> result[:, :, 0, 1]
array([[3, 0, 2, 0],
[0, 2, 2, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]], dtype=uint32)
"""
assert levels <= 256
image = np.ascontiguousarray(image)
assert image.ndim == 2
assert image.min() >= 0
assert image.max() < levels
image = image.astype(np.uint8)
distances = np.ascontiguousarray(distances, dtype=np.float64)
angles = np.ascontiguousarray(angles, dtype=np.float64)
assert distances.ndim == 1
assert angles.ndim == 1
P = np.zeros((levels, levels, len(distances), len(angles)),
dtype=np.uint32, order='C')
# count co-occurences
_glcm_loop(image, distances, angles, levels, P)
# make each GLMC symmetric
if symmetric:
Pt = np.transpose(P, (1, 0, 2, 3))
P = P + Pt
# normalize each GLMC
if normed:
P = P.astype(np.float64)
glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
glcm_sums[glcm_sums == 0] = 1
P /= glcm_sums
return P
def greycoprops(P, prop='contrast'):
"""Calculate texture properties of a GLCM.
Compute a feature of a grey level co-occurrence matrix to serve as
a compact summary of the matrix. The properties are computed as
follows:
- 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
- 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
- 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
- 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
- 'energy': :math:`\\sqrt{ASM}`
- 'correlation':
.. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
(j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]
Parameters
----------
P : ndarray
Input array. `P` is the grey-level co-occurrence histogram
for which to compute the specified property. The value
`P[i,j,d,theta]` is the number of times that grey-level j
occurs at a distance d and at an angle theta from
grey-level i.
prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
'correlation', 'ASM'}, optional
The property of the GLCM to compute. The default is 'contrast'.
Returns
-------
results : 2-D ndarray
2-dimensional array. `results[d, a]` is the property 'prop' for
the d'th distance and the a'th angle.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
Examples
--------
Compute the contrast for GLCMs with distances [1, 2] and angles
[0 degrees, 90 degrees]
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
... normed=True, symmetric=True)
>>> contrast = greycoprops(g, 'contrast')
>>> contrast
array([[ 0.58333333, 1. ],
[ 1.25 , 2.75 ]])
"""
assert P.ndim == 4
(num_level, num_level2, num_dist, num_angle) = P.shape
assert num_level == num_level2
assert num_dist > 0
assert num_angle > 0
# create weights for specified property
I, J = np.ogrid[0:num_level, 0:num_level]
if prop == 'contrast':
weights = (I - J) ** 2
elif prop == 'dissimilarity':
weights = np.abs(I - J)
elif prop == 'homogeneity':
weights = 1. / (1. + (I - J) ** 2)
elif prop in ['ASM', 'energy', 'correlation']:
pass
else:
raise ValueError('%s is an invalid property' % (prop))
# compute property for each GLCM
if prop == 'energy':
asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
results = np.sqrt(asm)
elif prop == 'ASM':
results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
elif prop == 'correlation':
results = np.zeros((num_dist, num_angle), dtype=np.float64)
I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]
diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]
std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),
axes=(0, 1))[0, 0])
std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),
axes=(0, 1))[0, 0])
cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),
axes=(0, 1))[0, 0]
# handle the special case of standard deviations near zero
mask_0 = std_i < 1e-15
mask_0[std_j < 1e-15] = True
results[mask_0] = 1
# handle the standard case
mask_1 = mask_0 == False
results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
weights = weights.reshape((num_level, num_level, 1, 1))
results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]
return results
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of the
angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'D', 'R', 'U', 'V'}
Method to determine the pattern::
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/\
Articoliriferimento/LBP.pdf, 2002.
"""
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'var': ord('V')
}
image = np.array(image, dtype='double', copy=True)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
| bsd-3-clause | -8,732,840,251,207,591,000 | 34.316547 | 80 | 0.561316 | false |
lablup/sorna-jupyter-kernel | src/ai/backend/integration/jupyter/install.py | 1 | 4036 | '''
The kernel installer.
Run `python -m ai.backend.integration.jupyter.install` to use Backend.AI in your Jupyter notebooks.
'''
import argparse
import json
import os
import sys
import webbrowser
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
from .kernel import kernels
def clean_kernel_spec(user=True, prefix=None):
mgr = KernelSpecManager()
# NOTE: remove_kernel_spec() and get_all_specs() does not support explicit prefix.
# Sometimes we may need to perform --clean-only multiple times to completely
# remove all kernelspecs installed around venvs and system global directories.
for name, info in mgr.get_all_specs().items():
if name.startswith('backend'):
print("Removing existing Backend.AI kernel: {0}"
.format(info['spec']['display_name']))
mgr.remove_kernel_spec(name)
def install_kernel_spec(name, spec_json, user=True, prefix=None):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(spec_json, f, sort_keys=True)
print("Installing Backend.AI Jupyter kernel spec: {0}"
.format(spec_json['display_name']))
KernelSpecManager().install_kernel_spec(
td, name, user=user, replace=True, prefix=prefix)
def query_yes_no(prompt):
valid = {'y': True, 'yes': True, 'n': False, 'no': False}
while True:
choice = input('{0} [y/n] '.format(prompt)).lower()
if choice in valid:
return valid[choice]
else:
prompt = 'Pleas answer in y/yes/n/no.'
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('--user', action='store_true',
help="Install to the per-user kernels registry. Default if not root.")
ap.add_argument('--sys-prefix', action='store_true',
help="Install to sys.prefix (e.g. a virtualenv or conda env)")
ap.add_argument('--clean-only', action='store_true',
help="Perform only clean-up of existing Backend.AI kernels.")
ap.add_argument('-q', '--quiet', action='store_true',
help="Do not ask the user anything.")
ap.add_argument('--prefix',
help="Install to the given prefix. "
"Kernelspec will be installed in {PREFIX}/share/jupyter/kernels/")
args = ap.parse_args(argv)
if args.sys_prefix:
args.prefix = sys.prefix
if not args.prefix and not _is_root():
args.user = True
clean_kernel_spec(user=args.user, prefix=args.prefix)
if args.clean_only:
return
for kern in kernels:
spec = {
"argv": [sys.executable, "-m", "ai.backend.integration.jupyter",
"-f", "{connection_file}",
"--",
"-k", kern.__name__],
"display_name": kern.language_info['name'],
"language": kern.language,
}
install_kernel_spec(kern.__name__, spec, user=args.user, prefix=args.prefix)
if not args.quiet:
print()
has_api_key = bool(os.environ.get('BACKEND_ACCESS_KEY', ''))
if has_api_key:
print('It seems that you already configured the API key. Enjoy!')
else:
if query_yes_no('You can get your own API keypair from https://cloud.backend.ai. Do you want to open the site?'):
webbrowser.open_new_tab('https://cloud.backend.ai')
print()
print('If you already have the keypair or just grabbed a new one,')
print('run the following in your shell before running jupyter notebook:\n')
print(' export BACKEND_ACCESS_KEY="AKIA..."')
print(' export BACKEND_SECRET_KEY="......."\n')
if __name__ == '__main__':
main()
| mit | 8,950,631,238,253,865,000 | 36.027523 | 125 | 0.610505 | false |
DonHilborn/DataGenerator | faker/providers/lv_LV/person.py | 1 | 4365 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..person import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{last_name}}, {{first_name}}'
)
first_names = (
'Ādams', 'Ādolfs', 'Agris', 'Aigars', 'Ainārs', 'Aivars', 'Alberts',
'Aldis', 'Aleksandrs', 'Alfrēds', 'Andrejs', 'Andris', 'Andrešs', 'Ansis',
'Antons', 'Armands', 'Arnis', 'Arnolds', 'Artis', 'Arturs', 'Artūrs', 'Arvīds',
'Augusts', 'Bērends', 'Bērtulis', 'Brencis', 'Dainis', 'Daniels', 'Dāvis',
'Dzintars', 'Edgars', 'Edmunds', 'Eduards', 'Edvīns', 'Egils', 'Elmārs', 'Elvis',
'Emīls', 'Ēriks', 'Ermanis', 'Ernests', 'Ēvalds', 'Fricis', 'Gatis', 'Gunārs',
'Guntars', 'Guntis', 'Ģederts', 'Ģirts', 'Hanss', 'Harijs', 'Henriks', 'Hermanis',
'Igors', 'Ilmārs', 'Imants', 'Indriķis', 'Ivars', 'Ivo', 'Jakobs', 'Janis', 'Jānis',
'Jannis', 'Jāzeps', 'Jēkabs', 'Jēkaubs', 'Jezups', 'Johans', 'Jūlijs', 'Juris', 'Kārlis',
'Kaspars', 'Konradus', 'Kristaps', 'Kristers', 'Krists', 'Krišjānis', 'Krišs', 'Laimonis',
'Lauris', 'Leons', 'Macs', 'Mareks', 'Māris', 'Mārtiņš', 'Matīss', 'Mihels', 'Mikels',
'Miķelis', 'Modris', 'Nikolajs', 'Niks', 'Normunds', 'Oļģerts', 'Oskars', 'Osvalds',
'Oto', 'Pauls', 'Pēteris', 'Raimonds', 'Raivis', 'Reinis', 'Ričards', 'Rihards', 'Roberts',
'Rolands', 'Rūdolfs', 'Sandis', 'Staņislavs', 'Tenis', 'Teodors', 'Toms', 'Uldis', 'Valdis',
'Viesturs', 'Viktors', 'Vilis', 'Vilnis', 'Viļums', 'Visvaldis', 'Vladislavs', 'Voldemārs',
'Ziedonis', 'Žanis', 'Agnese', 'Aiga', 'Aija', 'Aina', 'Alīda', 'Alise', 'Alma', 'Alvīne',
'Amālija', 'Anete', 'Anita', 'Anna', 'Annija', 'Antoņina', 'Antra', 'Ārija', 'Ausma', 'Austra',
'Baba', 'Baiba', 'Berta', 'Biruta', 'Broņislava', 'Dace', 'Daiga', 'Daina', 'Dārta', 'Diāna',
'Doroteja', 'Dzidra', 'Dzintra', 'Eda', 'Edīte', 'Elīna', 'Elita', 'Elizabete', 'Elvīra', 'Elza',
'Emīlija', 'Emma', 'Ērika', 'Erna', 'Eva', 'Evija', 'Evita', 'Gaida', 'Genovefa', 'Grēta', 'Grieta',
'Gunita', 'Gunta', 'Helēna', 'Ieva', 'Ilga', 'Ilona', 'Ilze', 'Ina', 'Ināra', 'Indra', 'Inese', 'Ineta',
'Inga', 'Ingrīda', 'Inguna', 'Inta', 'Irēna', 'Irma', 'Iveta', 'Jana', 'Janina', 'Jūle', 'Jūla',
'Jūlija', 'Karina', 'Karlīna', 'Katarīna', 'Katrīna', 'Krista', 'Kristiāna', 'Laila', 'Laura',
'Lavīze', 'Leontīne', 'Lība', 'Lidija', 'Liene', 'Līga', 'Ligita', 'Lilija', 'Lilita', 'Līna',
'Linda', 'Līza', 'Lizete', 'Lūcija', 'Madara', 'Made', 'Maija', 'Māra', 'Mare', 'Margareta', 'Margrieta',
'Marija', 'Mārīte', 'Marta', 'Maža', 'Milda', 'Minna', 'Mirdza', 'Monika', 'Natālija', 'Olga', 'Otīlija',
'Paula', 'Paulīna', 'Rasma', 'Regīna', 'Rita', 'Rudīte', 'Ruta', 'Rute', 'Samanta', 'Sandra', 'Sanita',
'Santa', 'Sapa', 'Sarmīte', 'Silvija', 'Sintija', 'Skaidrīte', 'Solvita', 'Tekla', 'Trīne', 'Valda',
'Valentīna', 'Valija', 'Velta', 'Veneranda', 'Vera', 'Veronika', 'Vija', 'Vilma', 'Vineta', 'Vita', 'Zane',
'Zelma', 'Zenta', 'Zigrīda'
)
last_names = (
'Ābele', 'Āboliņš', 'Ābols', 'Alksnis', 'Apinis', 'Apsītis', 'Auniņš',
'Auziņš', 'Avotiņš', 'Balodis', 'Baltiņš', 'Bērziņš', 'Birznieks', 'Bite',
'Briedis', 'Caune', 'Celmiņš', 'Celms', 'Cīrulis', 'Dzenis', 'Dūmiņš', 'Eglītis',
'Jaunzems', 'Kalējs', 'Kalniņš', 'Kaņeps', 'Kārkliņš', 'Kauliņš', 'Kļaviņš', 'Krastiņš',
'Krēsliņš', 'Krieviņš', 'Krievs', 'Krūmiņš', 'Krūze', 'Kundziņš', 'Lācis', 'Lagzdiņš',
'Lapsa', 'Līcis', 'Liepa', 'Liepiņš', 'Lukstiņš', 'Lūsis', 'Paegle', 'Pērkons', 'Podnieks',
'Polis', 'Priede', 'Priedītis', 'Puriņš', 'Purmals', 'Riekstiņš', 'Roze', 'Rozītis', 'Rubenis',
'Rudzītis', 'Saulītis', 'Siliņš', 'Skuja', 'Skujiņš', 'Sproģis', 'Strazdiņš', 'Turiņš', 'Vanags', 'Vīksna',
'Vilciņš', 'Vilks', 'Vītoliņš', 'Vītols', 'Zaķis', 'Zālītis', 'Zariņš', 'Zeltiņš', 'Ziemelis', 'Zirnis',
'Zvaigzne', 'Zvirbulis'
)
| mit | 7,871,736,875,233,210,000 | 72.421053 | 115 | 0.549343 | false |
deryni/cockpit | tools/title2sentence.py | 1 | 4253 | #!/usr/bin/env python3
# This file is part of Cockpit.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
import sys
import argparse
keep_words = [
'Web Console', 'Cockpit',
'Red Hat',
'Insights',
'Docker',
'Customer Portal',
'SELinux', 'SETroubleshoot',
'Tang',
'iSCSI',
'Linux',
'NetworkManager',
'PackageKit',
'vCPU',
'IPv4', 'IPv6',
'IoT',
'ID',
': Server',
': Invalid',
'KiB', 'MiB', 'GiB',
'ABRT Analytics',
'GNOME Software',
'CAs', 'VMs', 'CPUs',
'Hour : Minute',
'Ctrl+Alt',
'$ExitCode',
'Launch Remote Viewer',
'Failed to start',
]
patterns = [
"of $0 CPU",
"No memory reserved. Append a crashkernel option",
"Cockpit was unable to log in",
"Cockpit had an unexpected internal error",
"You need to switch to",
"Free up space in this group",
"This day doesn",
"Tip: Make your key",
"virt-install package needs to be",
"You need to switch to",
]
the_map = []
# Replace exact positions
def replace(s, old_s, word):
if not word.strip():
return s
while word in old_s:
i = old_s.find(word)
s = s[:i] + word + s[i + len(word):]
old_s = old_s.replace(word, " " * len(word), 1)
return s
def capitalize(s):
for word in keep_words:
if s.startswith(word):
return s
return s[0].upper() + s[1:]
def main():
parser = argparse.ArgumentParser(description="TODO")
parser.add_argument("-i", "--input", required=True, help="File containing strings")
parser.add_argument("-o", "--output", required=True, help="File for output script to be written into")
opts = parser.parse_args()
with open(opts.input, "r") as f:
for line in f:
old_s = line.strip()
old_s = old_s[1:-1] # Remove first and last quotes
if not old_s:
continue
# Leave out strings that don't contain a single upper case letter
if not [x for x in old_s if x.isupper()]:
continue
# MEH: There are some strings that don't need any action but are tricky to ignore
skip = False
for pattern in patterns:
if old_s.startswith(pattern):
skip = True
if skip:
continue
# Backslash special characters
for c in ['"', "&", "$", "/"]:
if c in old_s:
old_s = old_s.replace(c, "\\{0}".format(c))
new_s = old_s.lower()
# Return words that should stay upper-case
for word in keep_words:
new_s = replace(new_s, old_s, word)
# Return words that were all caps before (stuff like 'CPU', 'DNS'...)
for word in old_s.split(" "):
if word == word.upper():
new_s = replace(new_s, old_s, word)
# Return capitalization of (multiple) sentences
sentences = new_s.split(". ")
Sentences = list(map(capitalize, sentences))
new_s = ". ".join(Sentences)
if new_s != old_s:
the_map.append([old_s, new_s])
# Generate script for replacing these strings
output = ""
if the_map:
output = "find pkg src test/verify -type f -exec sed -i \\\n"
for pair in the_map:
output += '-e "s/\([^ ]\){0}/\\1{1}/" \\\n'.format(pair[0], pair[1])
output += "{} \;"
with open(opts.output, "w") as f:
f.write(output)
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 | 3,002,470,520,402,526,700 | 27.543624 | 106 | 0.5589 | false |
jclement/Cacheberry-Pi | lib/gislib.py | 1 | 3683 | # Adapted from code & formulas by David Z. Creemer and others
# http://www.zachary.com/blog/2005/01/12/python_zipcode_geo-programming
# http://williams.best.vwh.net/avform.htm
#
# Additions by Jeff Clement
from math import sin,cos,atan,acos,asin,atan2,sqrt,pi, modf, radians,degrees
# At the equator / on another great circle???
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = pi / 180.0
milesPerNauticalMile = 1.15078
kmsPerNauticalMile = 1.85200
degreeInMiles = milesPerNauticalMile * 60
degreeInKms = kmsPerNauticalMile * 60
# earth's mean radius = 6,371km
earthradius = 6371.0
def getDistance(loc1, loc2):
"aliased default algorithm; args are (lat_decimal,lon_decimal) tuples"
return getDistanceByHaversine(loc1, loc2)
def getDistanceByHaversine(loc1, loc2):
"Haversine formula - give coordinates as (lat_decimal,lon_decimal) tuples"
lat1, lon1 = loc1
lat2, lon2 = loc2
#if type(loc1[0]) == type(()):
# # convert from DMS to decimal
# lat1,lon1 = DMSToDecimal(loc1[0]),DMSToDecimal(loc1[1])
#if type(loc2[0]) == type(()):
# lat2,lon2 = DMSToDecimal(loc2[0]),DMSToDecimal(loc2[1])
# convert to radians
lon1 = lon1 * pi / 180.0
lon2 = lon2 * pi / 180.0
lat1 = lat1 * pi / 180.0
lat2 = lat2 * pi / 180.0
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2.0))**2
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
km = earthradius * c
return km
def DecimalToDMS(decimalvalue):
"convert a decimal value to degree,minute,second tuple"
d = modf(decimalvalue)[0]
m=0
s=0
return (d,m,s)
def DMSToDecimal((degrees,minutes,seconds)):
"Convert a value from decimal (float) to degree,minute,second tuple"
d = abs(degrees) + (minutes/60.0) + (seconds/3600.0)
if degrees < 0:
return -d
else:
return d
def getCoordinateDiffForDistance(originlat, originlon, distance, units="km"):
"""return longitude & latitude values that, when added to & subtraced from
origin longitude & latitude, form a cross / 'plus sign' whose ends are
a given distance from the origin"""
degreelength = 0
if units == "km":
degreelength = degreeInKms
elif units == "miles":
degreelength = degreeInMiles
else:
raise Exception("Units must be either 'km' or 'miles'!")
lat = distance / degreelength
lon = distance / (cos(originlat * rad) * degreelength)
return (lat, lon)
def isWithinDistance(origin, loc, distance):
"boolean for checking whether a location is within a distance"
if getDistanceByHaversine(origin, loc) <= distance:
return True
else:
return False
def isAngleWithin(a1, a2, threshold):
"determine if two angles are within {threshold} degrees of each other"
a_min = min(a1, a2)
a_max = max(a1, a2)
if (a_max-a_min) > threshold:
return ((a_min+360) - a_max) <= threshold
return (a_max - a_min) <= threshold
def calculateBearing(start, target):
"calculate a bearing in degrees (N=0 deg) from start to target point"
lat1, lon1 = map(radians, start)
lat2, lon2 = map(radians, target)
dLon = lon2-lon1
y = sin(dLon) * cos(lat2)
x = cos(lat1)*sin(lat2) - \
sin(lat1)*cos(lat2)*cos(dLon)
return (degrees(atan2(y, x))) % 360
def humanizeBearing(bearing):
"convert a bearing in degrees to a human readable version"
#symbols = ['N','NE','E','SE','S','SW','W','NW']
symbols = ['N','NNE','NE','ENE','E','ESE','SE','SSE','S','SSW','SW','WSW','W','WNW','NW','NNW']
step = 360.0 / len(symbols)
for i in range(len(symbols)):
if isAngleWithin(i*step, bearing, step/2):
return symbols[i]
| bsd-3-clause | 593,282,141,699,682,600 | 28.230159 | 97 | 0.663046 | false |
PolicyStat/selenium-old | py/selenium/webdriver/common/exceptions.py | 1 | 2133 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions that may happen in all the webdriver code."""
class WebDriverException(Exception):
def __init__(self, msg=None):
Exception.__init__(self, msg)
class ErrorInResponseException(WebDriverException):
"""An error has occurred on the server side.
This may happen when communicating with the firefox extension
or the remote driver server."""
def __init__(self, response, msg):
WebDriverException.__init__(self, msg)
self.response = response
class InvalidSwitchToTargetException(WebDriverException):
"""The frame or window target to be switched doesn't exist."""
pass
class NoSuchFrameException(InvalidSwitchToTargetException):
pass
class NoSuchWindowException(InvalidSwitchToTargetException):
pass
class NoSuchElementException(WebDriverException):
"""find_element_by_* can't find the element."""
pass
class NoSuchAttributeException(WebDriverException):
"""find_element_by_* can't find the element."""
pass
class StaleElementReferenceException(WebDriverException):
pass
class ElementNotVisibleException(WebDriverException):
pass
class InvalidElementStateException(WebDriverException):
pass
class ElementNotSelectableException(WebDriverException):
pass
class InvalidCookieDomainException(WebDriverException):
pass
class UnableToSetCookieException(WebDriverException):
pass
class RemoteDriverServerException(WebDriverException):
pass
class TimeoutException(WebDriverException):
pass
| apache-2.0 | -8,961,527,223,932,074,000 | 29.471429 | 74 | 0.763244 | false |
te-je/fargo | test/test_fargo.py | 1 | 3982 | import pytest
from dulwich.repo import Repo
from fargo import find_and_replace
suits = [u'\u2660', u'\u2665', u'\u2666', u'\u2663']
ranks = list(str(i) for i in range(2, 11)) + ['J', 'Q', 'K', 'A']
deck_text = u"This here is a deck of cards man:\n\n{}".format(
"\n".join(''.join((r, s)) for s in suits for r in ranks)
)
@pytest.fixture
def repo(tmpdir):
test_file = tmpdir.join('test_file')
test_file.write(
u"For here we have\nA test file\nwith only useless stuff\n"
)
# Add some unicode data
for encoding in ('utf8', 'utf16'):
deck_file = tmpdir.join('deck').join(encoding)
deck_file.write_text(deck_text, encoding=encoding, ensure=True)
repo = Repo.init(tmpdir.strpath)
repo.stage([b'test_file', b'deck/utf8', b'deck/utf16'])
repo.do_commit(b'initial',
committer=b'Te-je Rodgers <[email protected]>')
return repo
@pytest.mark.parametrize('interactive', [True, False])
@pytest.mark.parametrize('encoding', ['utf8', 'utf16'])
@pytest.mark.parametrize('search,sub', [
(u'\u2660', u'S'),
(u'\u2665', u'\u2661'),
(u'\u2666', u'of Diamonds')
])
def test_replace_unicode_cards(mocker, repo, tmpdir, interactive, encoding,
search, sub):
mocker.patch('fargo.click.prompt').return_value = 'yes'
find_and_replace(search, sub, repo=str(tmpdir), interactive=interactive)
deck_file = tmpdir.join('deck').join(encoding)
new_deck_text = deck_file.read_text(encoding=encoding)
assert deck_text.replace(search, sub) == new_deck_text
@pytest.mark.parametrize('interactive', [True, False])
@pytest.mark.parametrize('enc', ['utf8', 'utf16'])
@pytest.mark.parametrize('search,sub', [
(u'([2-9]|10|J|Q|K|A)(\u2660|\u2665|\u2666|\u2663)', u'\\2\\1'),
(u'(?P<rank>[2-9]|10|J|Q|K|A)(?P<suit>\u2660|\u2665|\u2666|\u2663)',
u'\\g<suit>\\g<rank>')
])
def test_regex_replace_unicode_cards(mocker, repo, tmpdir, enc, interactive,
search, sub):
mocker.patch('fargo.click.prompt').return_value = 'yes'
find_and_replace(
search, sub,
use_regex=True, repo=str(tmpdir), interactive=interactive
)
deck_file = tmpdir.join('deck').join(enc)
new_deck_text = deck_file.read_text(encoding=enc)
expected_deck_text = u"This here is a deck of cards man:\n\n{}".format(
"\n".join(''.join((s, r)) for s in suits for r in ranks)
)
assert new_deck_text == expected_deck_text
@pytest.mark.parametrize('encoding', ['utf8', 'utf16'])
@pytest.mark.parametrize('search,sub', [
(u'\u2660', 'S'),
(u'\u2665', '\u2661'),
(u'\u2666', 'of Diamonds')
])
def test_replace_unicode_cards_reject_interactive(
mocker, repo, tmpdir, encoding, search, sub):
mocker.patch('fargo.click.prompt').return_value = 'no'
find_and_replace(search, sub, repo=str(tmpdir), interactive=True)
deck_file = tmpdir.join('deck').join(encoding)
new_deck_text = deck_file.read_text(encoding=encoding)
assert deck_text.replace(search, sub) != new_deck_text
def test_interactive_specific_items(mocker, repo, tmpdir):
# Replace only the first and second items
mocker.patch('fargo.click.prompt').return_value = '0 1'
find_and_replace('e', 'E', repo=str(tmpdir), interactive=True)
test_file = tmpdir.join('test_file')
new_text = test_file.read()
expected_text = u"For hErE we have\nA tEst filE\nwith only usElEss stuff\n"
assert new_text == expected_text
@pytest.mark.parametrize('search,sub', [
(u'\u2660', 'S'),
(u'\u2665', '\u2661'),
(u'\u2666', 'of Diamonds')
])
def test_chardet_threshold(mocker, repo, tmpdir, search, sub):
find_and_replace(search, sub, repo=str(tmpdir),
chardet_threshold=1, interactive=False)
deck_file = tmpdir.join('deck').join('utf8')
new_deck_text = deck_file.read_text(encoding='utf8')
assert deck_text.replace(search, sub) != new_deck_text
| mit | -5,697,366,596,718,920,000 | 33.327586 | 79 | 0.632848 | false |
kubernetes-client/python | kubernetes/client/models/v1alpha1_audit_sink.py | 1 | 6649 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1AuditSink(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha1AuditSinkSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1AuditSink - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1alpha1AuditSink. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1AuditSink. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1AuditSink.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1AuditSink. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha1AuditSink. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1AuditSink. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1AuditSink.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1AuditSink. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1AuditSink. # noqa: E501
:return: The metadata of this V1alpha1AuditSink. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1AuditSink.
:param metadata: The metadata of this V1alpha1AuditSink. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha1AuditSink. # noqa: E501
:return: The spec of this V1alpha1AuditSink. # noqa: E501
:rtype: V1alpha1AuditSinkSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha1AuditSink.
:param spec: The spec of this V1alpha1AuditSink. # noqa: E501
:type: V1alpha1AuditSinkSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1AuditSink):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1AuditSink):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 5,829,677,061,528,971,000 | 31.915842 | 312 | 0.611671 | false |
jmdejong/Asciifarm | asciifarm/client/display.py | 1 | 5695 |
import os
from ratuil.layout import Layout
from ratuil.bufferedscreen import BufferedScreen as Screen
#from ratuil.screen import Screen
from ratuil.textstyle import TextStyle
from asciifarm.common.utils import get
from .listselector import ListSelector
SIDEWIDTH = 20
ALPHABET = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
class Display:
def __init__(self, charMap):
self.characters = {}
def parseSprite(sprite):
if isinstance(sprite, str):
return (sprite, None, None)
char = get(sprite, 0, " ")
fg = get(sprite, 1)
bg = get(sprite, 2)
return (char, fg, bg)
for name, sprite in charMap["mapping"].items():
vals = parseSprite(sprite)
if vals:
self.characters[name] = vals
for name, colours in charMap.get("writable", {}).items():
fg = get(colours, 0)
bg = get(colours, 1)
for i in range(min(len(ALPHABET), len(charMap.get("alphabet", [])))):
self.characters[name + '-' + ALPHABET[i]] = (charMap["alphabet"][i], fg, bg)
self.defaultChar = parseSprite(charMap.get("default", "?"))
self.messageColours = charMap.get("msgcolours", {})
fname = os.path.join(os.path.dirname(__file__), "layout.xml")
self.layout = Layout.from_xml_file(fname)
self.layout.get("field").set_char_size(charMap.get("charwidth", 1))
self.screen = Screen()
self.screen.clear()
self.layout.set_target(self.screen)
self.layout.update()
# temporary, until these have a better place
self.inventory = ListSelector(self.getWidget("inventory"))
self.inventory._debug_name = "inventory"
self.equipment = ListSelector(self.getWidget("equipment"))
self.equipment._debug_name = "equipment"
self.ground = ListSelector(self.getWidget("ground"))
self.ground._debug_name = "ground"
self.switch = ListSelector(self.getWidget("switchtitles"))
self.switch._debug_name = "switch"
self.switch.setItems(["inventory", "equipment", "ground"])
self.menus = {
"inventory": self.inventory,
"equipment": self.equipment,
"ground": self.ground
}
self.layout.get("switch").select(0)
def getWidget(self, name):
return self.layout.get(name)
def resizeField(self, size):
self.getWidget("field").set_size(*size)
self.getWidget("fieldbackground").change()
def drawFieldCells(self, cells):
field = self.getWidget("field")
for cell in cells:
(x, y), spriteNames = cell
if not len(spriteNames):
char, fg, bg = self.getChar(' ')
else:
char, fg, bg = self.getChar(spriteNames[0])
for spriteName in spriteNames[1:]:
if bg is not None:
break
_char, _fg, bg = self.getChar(spriteName)
field.change_cell(x, y, char, TextStyle(fg, bg))
def setFieldCenter(self, pos):
self.getWidget("field").set_center(*pos)
def setHealth(self, health, maxHealth):
if health is None:
health = 0
if maxHealth is None:
maxHealth = 0
self.getWidget("health").set_total(maxHealth)
self.getWidget("health").set_filled(health)
self.getWidget("healthtitle").format({"filled": health, "total":maxHealth})
def showInfo(self, infostring):
self.getWidget("info").set_text(infostring)
def selectMenu(self, *args, **kwargs):
self.switch.select(*args, **kwargs)
self.layout.get("switch").select(self.getSelectedMenu())
def getSelectedMenu(self):
return self.switch.getSelectedItem()
def getSelectedItem(self, menu=None):
return self._getMenu(menu).getSelected()
def selectItem(self, menu=None, *args, **kwargs):
self._getMenu(menu).select(*args, **kwargs)
def _getMenu(self, name=None):
if name is None:
name = self.getSelectedMenu()
name = name.casefold()
return self.menus[name]
def setInventory(self, items):
self.inventory.setItems(items)
def setEquipment(self, slots):
self.equipment.setItems([
slot + ": " + (item if item else "")
for slot, item in slots
])
def setGround(self, items):
self.ground.setItems(items)
def addMessage(self, message, msgtype=None):
if msgtype is not None:
style = TextStyle(*self.messageColours.get(msgtype, (7,0)))
else:
style = None
self.getWidget("msg").add_message(message, style)
def log(self, message):
self.addMessage(str(message))
def scrollBack(self, amount, relative=True):
self.getWidget("msg").scroll(amount, relative)
def setInputString(self, string, cursor):
self.getWidget("textinput").set_text(string, cursor)
def update(self):
self.layout.update()
self.screen.update()
def getChar(self, sprite):
"""This returns the character belonging to some spritename. This does not read a character"""
return self.characters.get(sprite, self.defaultChar)
def update_size(self):
self.screen.reset()
| gpl-3.0 | 3,941,003,261,231,911,400 | 31.729885 | 109 | 0.569096 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.