repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
martyngigg/pyqt-msvc | doc/sphinx/conf.py | 1 | 6425 | # -*- coding: utf-8 -*-
#
# PyQt documentation build configuration file, created by
# sphinx-quickstart on Sat May 30 14:28:55 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyQt'
copyright = u'2015 Riverbank Computing Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.11.4'
# The full version, including alpha/beta/rc tags.
release = '4.11.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "PyQt 4.11.4 Reference Guide"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/logo_tn.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyQtdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyQt.tex', u'PyQt Documentation',
u'Riverbank Computing Limited', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def setup(app):
""" Define roles specific to PyQt. """
pass
| gpl-3.0 | 1,007,111,443,806,572,400 | 31.125 | 80 | 0.711595 | false |
rananda/cfme_tests | cfme/middleware/provider/hawkular.py | 1 | 6288 | import re
from cfme.common import TopologyMixin, TimelinesMixin
from . import MiddlewareProvider
from utils.appliance import Navigatable
from utils.db import cfmedb
from utils.varmeth import variable
from . import _get_providers_page, _db_select_query
from . import download, MiddlewareBase, auth_btn, mon_btn
from utils.appliance.implementations.ui import navigate_to
from mgmtsystem.hawkular import Hawkular
@MiddlewareProvider.add_provider_type
class HawkularProvider(MiddlewareBase, TopologyMixin, TimelinesMixin, MiddlewareProvider):
"""
HawkularProvider class holds provider data. Used to perform actions on hawkular provider page
Args:
name: Name of the provider
hostname: Hostname/IP of the provider
port: http/https port of hawkular provider
credentials: see Credential inner class.
key: The CFME key of the provider in the yaml.
db_id: database row id of provider
Usage:
myprov = HawkularProvider(name='foo',
hostname='localhost',
port=8080,
credentials=Provider.Credential(principal='admin', secret='foobar')))
myprov.create()
myprov.num_deployment(method="ui")
"""
STATS_TO_MATCH = MiddlewareProvider.STATS_TO_MATCH +\
['num_server', 'num_domain', 'num_deployment', 'num_datasource', 'num_messaging']
property_tuples = MiddlewareProvider.property_tuples +\
[('name', 'Name'), ('hostname', 'Host Name'), ('port', 'Port'), ('provider_type', 'Type')]
type_name = "hawkular"
mgmt_class = Hawkular
def __init__(self, name=None, hostname=None, port=None, credentials=None, key=None,
appliance=None, **kwargs):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.hostname = hostname
self.port = port
self.provider_type = 'Hawkular'
if not credentials:
credentials = {}
self.credentials = credentials
self.key = key
self.db_id = kwargs['db_id'] if 'db_id' in kwargs else None
def _form_mapping(self, create=None, **kwargs):
return {'name_text': kwargs.get('name'),
'type_select': create and 'Hawkular',
'hostname_text': kwargs.get('hostname'),
'port_text': kwargs.get('port')}
@variable(alias='db')
def num_deployment(self):
return self._num_db_generic('middleware_deployments')
@num_deployment.variant('ui')
def num_deployment_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Deployments"))
@variable(alias='db')
def num_server(self):
return self._num_db_generic('middleware_servers')
@num_server.variant('ui')
def num_server_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Servers"))
@variable(alias='db')
def num_server_group(self):
res = cfmedb().engine.execute(
"SELECT count(*) "
"FROM ext_management_systems, middleware_domains, middleware_server_groups "
"WHERE middleware_domains.ems_id=ext_management_systems.id "
"AND middleware_domains.id=middleware_server_groups.domain_id "
"AND ext_management_systems.name='{0}'".format(self.name))
return int(res.first()[0])
@variable(alias='db')
def num_datasource(self):
return self._num_db_generic('middleware_datasources')
@num_datasource.variant('ui')
def num_datasource_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Datasources"))
@variable(alias='db')
def num_domain(self):
return self._num_db_generic('middleware_domains')
@num_domain.variant('ui')
def num_domain_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Domains"))
@variable(alias='db')
def num_messaging(self):
return self._num_db_generic('middleware_messagings')
@num_messaging.variant('ui')
def num_messaging_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Messagings"))
@variable(alias='ui')
def is_refreshed(self, reload_data=True):
self.load_details(refresh=reload_data)
if re.match('Success.*Minute.*Ago', self.get_detail("Status", "Last Refresh")):
return True
else:
return False
@is_refreshed.variant('db')
def is_refreshed_db(self):
ems = cfmedb()['ext_management_systems']
dates = cfmedb().session.query(ems.created_on,
ems.updated_on).filter(ems.name == self.name).first()
return dates.updated_on > dates.created_on
@classmethod
def download(cls, extension):
_get_providers_page()
download(extension)
def load_details(self, refresh=False):
"""Call super class `load_details` and load `db_id` if not set"""
MiddlewareProvider.load_details(self, refresh=refresh)
if not self.db_id or refresh:
tmp_provider = _db_select_query(
name=self.name, type='ManageIQ::Providers::Hawkular::MiddlewareManager').first()
self.db_id = tmp_provider.id
def load_topology_page(self):
navigate_to(self, 'TopologyFromDetails')
def recheck_auth_status(self):
self.load_details(refresh=True)
auth_btn("Re-check Authentication Status")
def load_timelines_page(self):
self.load_details()
mon_btn("Timelines")
@staticmethod
def from_config(prov_config, prov_key):
credentials_key = prov_config['credentials']
credentials = HawkularProvider.process_credential_yaml_key(credentials_key)
return HawkularProvider(
name=prov_config['name'],
key=prov_key,
hostname=prov_config['hostname'],
port=prov_config['port'],
credentials={'default': credentials})
| gpl-2.0 | -1,534,870,332,385,972,000 | 37.109091 | 98 | 0.637087 | false |
jimmy201602/webterminal | permission/forms.py | 1 | 4283 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.models import Permission as AuthPermission
from permission.models import Permission
from django.contrib.contenttypes.models import ContentType
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Field
from django.db import models
from django.forms.widgets import CheckboxSelectMultiple
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2'
self.helper.field_class = 'col-md-8'
self.helper.layout = Layout(*[Div(field, css_class='form-group')
for field in ['user', 'newpassword1', 'newpassword2', 'email']])
self.instance = False
if 'instance' in kwargs.keys():
kwargs.pop('instance')
self.instance = True
super(RegisterForm, self).__init__(*args, **kwargs)
user = forms.CharField(
required=True,
label=_(u"user name"),
error_messages={'required': _(u'Please input a valid user.')},
max_length=100,
widget=forms.TextInput(
attrs={
'class': u"form-control",
}
)
)
newpassword1 = forms.CharField(
required=True,
label=_(u"your password"),
error_messages={'required': _(u'Please input your password')},
widget=forms.PasswordInput(
attrs={
'placeholder': _(u"new password"),
'class': u"form-control",
}
)
)
newpassword2 = forms.CharField(
required=True,
label=_(u"verify your password"),
error_messages={'required': _(u'please input your password again')},
widget=forms.PasswordInput(
attrs={
'placeholder': _(u"verify your password"),
'class': u"form-control",
}
)
)
email = forms.EmailField(
required=True,
label=_(u"email"),
error_messages={'required': _(
u'Please input a valid email address.')},
widget=forms.EmailInput(
attrs={
'class': u"form-control",
}
)
)
def clean(self):
if not self.is_valid():
raise forms.ValidationError({'user': _(u"every filed required")})
elif self.cleaned_data['newpassword1'] != self.cleaned_data['newpassword2']:
raise forms.ValidationError({'newpassword1': _(
u"your password does't the same"), 'newpassword2': _(u"your password does't the same")})
elif self.cleaned_data['user']:
if not self.instance:
if User.objects.filter(username=self.cleaned_data['user']):
raise forms.ValidationError(
{'user': _(u"User name has been registered!")})
cleaned_data = super(RegisterForm, self).clean()
return cleaned_data
class CustomModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return force_text(_(obj.name))
class PermissionForm(forms.ModelForm):
permissions = CustomModelMultipleChoiceField(queryset=AuthPermission.objects.
filter(content_type__app_label__in=[
'common', 'permission'], codename__contains='can_'),
widget=forms.CheckboxSelectMultiple())
def __init__(self, *args, **kwargs):
super(PermissionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2'
self.helper.field_class = 'col-md-8'
self.helper.layout = Layout(*[Div(field, css_class='form-group')
for field in ['user', 'permissions', 'groups']])
class Meta:
model = Permission
fields = ['user', 'permissions', 'groups']
| gpl-3.0 | 4,408,388,683,405,748,700 | 37.585586 | 108 | 0.573663 | false |
haeihaiehaei/Python-Projects | vmware/op5_vcsa_plugin/check_vcsa.py | 1 | 4239 | #!/usr/bin/python
"""
Op5 check to get the health of the VCenter Appliance via REST API.
Copyright 2017 Martin Persson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
author = 'Martin Persson'
url = 'https://github.com/haeihaiehaei/Python-Projects/blob/master/vmware/op5_vcsa_plugin/check_vcsa.py'
version = '0.1'
try:
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import json
import sys
import argparse
import base64
except ImportError:
print "Error: missing one of the libraries (requests, json, sys, argparse, base64)"
sys.exit()
# Disable the unverified HTTPS warnings. We are not running certificates.
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Handle our arguments here.
parser = argparse.ArgumentParser(
description=__doc__,
epilog='Developed by %s - For more information see: "%s"'
% (author, url))
parser.add_argument('-u', '--username', dest='username', required=True, help='Username, ex administrator')
parser.add_argument('-p', '--password', dest='password', required=True, help='Password for the user')
parser.add_argument('-d', '--domain', dest='domain', required=True, help='domain name for the vcenter')
parser.add_argument('-U', '--url', dest='url', required=True, help='url to the vcenter')
parser.add_argument('-c', '--check', dest='check', required=True, help='what are we checking, the following is avaliable: database-storage, load, mem, storage')
parser.add_argument('-v', '--version', action='version', version='%(prog)s (version 0.16)')
args = parser.parse_args()
def login():
credentials = str(args.username) + ":" + str(args.password)
# To send the authentication header we need to convert it to Base64.
b64credentials = "Basic" + " " + base64.b64encode(credentials)
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/com/vmware/cis/session"
payload = ""
headers = {
'content-type': "application/json",
'authorization': b64credentials,
}
# Set the session_id to a global variable so we can use it later.
global session_id
session = requests.request("POST", url, data=payload, headers=headers, verify=False)
session_id = json.loads(session.text)['value']
def health_check():
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/appliance/health/" + str(args.check)
headers = {
'vmware-api-session-id': "%s" % session_id
}
response = requests.request("GET", url, headers=headers, verify=False)
value = json.loads(response.text)['value']
if value == 'green':
print('OK')
logout()
sys.exit(0)
elif value == 'yellow':
print('Warning')
sys.exit(1)
elif value == 'orange':
print('Warning')
sys.exit(1)
elif value == 'red':
print('Critical.')
logout()
sys.exit(2)
def logout():
url = "https://" + str(args.url) + "." + str(args.domain) + "/rest/com/vmware/cis/session"
headers = {
'vmware-api-session-id': "%s" % session_id
}
logout_value = requests.request("DELETE", url, headers=headers, verify=False)
print(logout_value.text)
login()
health_check()
logout()
| gpl-3.0 | -1,029,411,036,243,976,400 | 38.25 | 460 | 0.689314 | false |
QinerTech/QinerApps | openerp/addons/website_blog/models/website_blog.py | 2 | 12097 | # -*- coding: utf-8 -*-
from datetime import datetime
import lxml
import random
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.translate import html_translate
class Blog(osv.Model):
_name = 'blog.blog'
_description = 'Blogs'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Blog Name', required=True, translate=True),
'subtitle': fields.char('Blog Subtitle', translate=True),
}
def all_tags(self, cr, uid, ids, min_limit=1, context=None):
req = """
SELECT
p.blog_id, count(*), r.blog_tag_id
FROM
blog_post_blog_tag_rel r
join blog_post p on r.blog_post_id=p.id
WHERE
p.blog_id in %s
GROUP BY
p.blog_id,
r.blog_tag_id
ORDER BY
count(*) DESC
"""
cr.execute(req, [tuple(ids)])
tag_by_blog = {i: [] for i in ids}
for blog_id, freq, tag_id in cr.fetchall():
if freq >= min_limit:
tag_by_blog[blog_id].append(tag_id)
tag_obj = self.pool['blog.tag']
for blog_id in tag_by_blog:
tag_by_blog[blog_id] = tag_obj.browse(cr, uid, tag_by_blog[blog_id], context=context)
return tag_by_blog
class BlogTag(osv.Model):
_name = 'blog.tag'
_description = 'Blog Tag'
_inherit = ['website.seo.metadata']
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'post_ids': fields.many2many(
'blog.post', string='Posts',
),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class BlogPost(osv.Model):
_name = "blog.post"
_description = "Blog Post"
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'id DESC'
_mail_post_access = 'read'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = super(BlogPost, self)._website_url(cr, uid, ids, field_name, arg, context=context)
for blog_post in self.browse(cr, uid, ids, context=context):
res[blog_post.id] = "/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post))
return res
def _compute_ranking(self, cr, uid, ids, name, arg, context=None):
res = {}
for blog_post in self.browse(cr, uid, ids, context=context):
age = datetime.now() - datetime.strptime(blog_post.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
res[blog_post.id] = blog_post.visits * (0.5+random.random()) / max(3, age.days)
return res
def _default_content(self, cr, uid, context=None):
return ''' <div class="container">
<section class="mt16 mb16">
<p class="o_default_snippet_text">''' + _("Start writing here...") + '''</p>
</section>
</div> '''
_columns = {
'name': fields.char('Title', required=True, translate=True),
'subtitle': fields.char('Sub Title', translate=True),
'author_id': fields.many2one('res.partner', 'Author'),
'cover_properties': fields.text('Cover Properties'),
'blog_id': fields.many2one(
'blog.blog', 'Blog',
required=True, ondelete='cascade',
),
'tag_ids': fields.many2many(
'blog.tag', string='Tags',
),
'content': fields.html('Content', translate=html_translate, sanitize=False),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', '&', ('model', '=', self._name), ('message_type', '=', 'comment'), ('path', '=', False)
],
string='Website Messages',
help="Website communication history",
),
# creation / update stuff
'create_date': fields.datetime(
'Created on',
select=True, readonly=True,
),
'create_uid': fields.many2one(
'res.users', 'Author',
select=True, readonly=True,
),
'write_date': fields.datetime(
'Last Modified on',
select=True, readonly=True,
),
'write_uid': fields.many2one(
'res.users', 'Last Contributor',
select=True, readonly=True,
),
'author_avatar': fields.related(
'author_id', 'image_small',
string="Avatar", type="binary"),
'visits': fields.integer('No of Views'),
'ranking': fields.function(_compute_ranking, string='Ranking', type='float'),
}
_defaults = {
'name': '',
'content': _default_content,
'cover_properties': '{"background-image": "none", "background-color": "oe_none", "opacity": "0.6", "resize_class": ""}',
'author_id': lambda self, cr, uid, ctx=None: self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id,
}
def html_tag_nodes(self, html, attribute=None, tags=None, context=None):
""" Processing of html content to tag paragraphs and set them an unique
ID.
:return result: (html, mappin), where html is the updated html with ID
and mapping is a list of (old_ID, new_ID), where old_ID
is None is the paragraph is a new one. """
existing_attributes = []
mapping = []
if not html:
return html, mapping
if tags is None:
tags = ['p']
if attribute is None:
attribute = 'data-unique-id'
# form a tree
root = lxml.html.fragment_fromstring(html, create_parent='div')
if not len(root) and root.text is None and root.tail is None:
return html, mapping
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag not in tags:
continue
ancestor_tags = [parent.tag for parent in node.iterancestors()]
old_attribute = node.get(attribute)
new_attribute = old_attribute
if not new_attribute or (old_attribute in existing_attributes):
if ancestor_tags:
ancestor_tags.pop()
counter = random.randint(10000, 99999)
ancestor_tags.append('counter_%s' % counter)
new_attribute = '/'.join(reversed(ancestor_tags))
node.set(attribute, new_attribute)
existing_attributes.append(new_attribute)
mapping.append((old_attribute, new_attribute))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html, mapping
def _postproces_content(self, cr, uid, id, content=None, context=None):
if content is None:
content = self.browse(cr, uid, id, context=context).content
if content is False:
return content
content, mapping = self.html_tag_nodes(content, attribute='data-chatter-id', tags=['p'], context=context)
if id: # not creating
existing = [x[0] for x in mapping if x[0]]
msg_ids = self.pool['mail.message'].search(cr, SUPERUSER_ID, [
('res_id', '=', id),
('model', '=', self._name),
('path', 'not in', existing),
('path', '!=', False)
], context=context)
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, msg_ids, context=context)
return content
def _check_for_publication(self, cr, uid, ids, vals, context=None):
if vals.get('website_published'):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
for post in self.browse(cr, uid, ids, context=context):
post.blog_id.message_post(
body='<p>%(post_publication)s <a href="%(base_url)s/blog/%(blog_slug)s/post/%(post_slug)s">%(post_link)s</a></p>' % {
'post_publication': _('A new post %s has been published on the %s blog.') % (post.name, post.blog_id.name),
'post_link': _('Click here to access the post.'),
'base_url': base_url,
'blog_slug': slug(post.blog_id),
'post_slug': slug(post),
},
subtype='website_blog.mt_blog_blog_published')
return True
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, None, vals['content'], context=context)
create_context = dict(context, mail_create_nolog=True)
post_id = super(BlogPost, self).create(cr, uid, vals, context=create_context)
self._check_for_publication(cr, uid, [post_id], vals, context=context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'content' in vals:
vals['content'] = self._postproces_content(cr, uid, ids[0], vals['content'], context=context)
result = super(BlogPost, self).write(cr, uid, ids, vals, context)
self._check_for_publication(cr, uid, ids, vals, context=context)
return result
def get_access_action(self, cr, uid, ids, context=None):
""" Override method that generated the link to access the document. Instead
of the classic form view, redirect to the post on the website directly """
post = self.browse(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_url',
'url': '/blog/%s/post/%s' % (post.blog_id.id, post.id),
'target': 'self',
'res_id': self.id,
}
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
""" Override to set the access button: everyone can see an access button
on their notification email. It will lead on the website view of the
post. """
res = super(BlogPost, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
access_action = self._notification_link_helper('view', model=message.model, res_id=message.res_id)
for category, data in res.iteritems():
res[category]['button_access'] = {'url': access_action, 'title': _('View Blog Post')}
return res
class Website(osv.Model):
_inherit = "website"
def page_search_dependencies(self, cr, uid, view_id, context=None):
dep = super(Website, self).page_search_dependencies(cr, uid, view_id, context=context)
post_obj = self.pool.get('blog.post')
view = self.pool.get('ir.ui.view').browse(cr, uid, view_id, context=context)
name = view.key.replace("website.", "")
fullname = "website.%s" % name
dom = [
'|', ('content', 'ilike', '/page/%s' % name), ('content', 'ilike', '/page/%s' % fullname)
]
posts = post_obj.search(cr, uid, dom, context=context)
if posts:
page_key = _('Blog Post')
dep[page_key] = []
for p in post_obj.browse(cr, uid, posts, context=context):
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to have a link to this page !') % p.name,
'link': p.website_url
})
return dep
| gpl-3.0 | 4,718,703,543,692,162,000 | 39.868243 | 137 | 0.55427 | false |
nhuntwalker/rational_whimsy | rational_whimsy/blog/models.py | 1 | 2123 | """The Blog Post model."""
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
from redactor.fields import RedactorField
from taggit.managers import TaggableManager
# Create your models here.
PUBLICATION_STATUS = (
("published", "Published"),
("draft", "Draft"),
("private", "Private")
)
class PostManager(models.Manager):
"""Retrieve all the published posts in reverse date order."""
def get_queryset(self):
"""Alter the queryset returned."""
return super(
PostManager,
self
).get_queryset().filter(status="published").order_by("-published_date")
class Post(models.Model):
"""The model for an individual blog post."""
title = models.CharField(name="title", max_length=255)
cover_img = models.ImageField(upload_to="post_covers", default="post_covers/stock-cover.jpg")
body = RedactorField(verbose_name="body")
created = models.DateTimeField(name="created", auto_now_add=True)
published_date = models.DateTimeField(
name="published_date",
blank=True,
null=True
)
modified = models.DateTimeField(name="modified", auto_now=True)
slug = models.SlugField(max_length=255, unique=True)
status = models.CharField(
name="status", choices=PUBLICATION_STATUS,
default="draft", max_length=20)
featured = models.BooleanField(default=False)
objects = models.Manager()
published = PostManager()
tags = TaggableManager()
def __str__(self):
"""The string representation of the object."""
return self.title
@receiver(post_save, sender=Post)
def unfeature_posts(sender, **kwargs):
"""Reset feature status when saved post is featured.
When a post is saved (either added or edited), if it's checked as being
featured then make every/any other featured post unfeatured.
"""
if kwargs["instance"].featured:
other_posts = Post.objects.exclude(pk=kwargs["instance"].pk)
for post in other_posts:
post.featured = False
post.save()
| mit | -6,685,609,270,705,033,000 | 30.220588 | 97 | 0.666039 | false |
home-assistant/home-assistant | tests/components/stream/test_recorder.py | 1 | 9139 | """The tests for hls streams."""
from __future__ import annotations
import asyncio
from collections import deque
from datetime import timedelta
from io import BytesIO
import logging
import os
import threading
from unittest.mock import patch
import async_timeout
import av
import pytest
from homeassistant.components.stream import create_stream
from homeassistant.components.stream.core import Segment
from homeassistant.components.stream.fmp4utils import get_init_and_moof_data
from homeassistant.components.stream.recorder import recorder_save_worker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video
TEST_TIMEOUT = 7.0 # Lower than 9s home assistant timeout
MAX_ABORT_SEGMENTS = 20 # Abort test to avoid looping forever
class SaveRecordWorkerSync:
"""
Test fixture to manage RecordOutput thread for recorder_save_worker.
This is used to assert that the worker is started and stopped cleanly
to avoid thread leaks in tests.
"""
def __init__(self):
"""Initialize SaveRecordWorkerSync."""
self.reset()
self._segments = None
self._save_thread = None
def recorder_save_worker(self, file_out: str, segments: deque[Segment]):
"""Mock method for patch."""
logging.debug("recorder_save_worker thread started")
assert self._save_thread is None
self._segments = segments
self._save_thread = threading.current_thread()
self._save_event.set()
async def get_segments(self):
"""Return the recorded video segments."""
with async_timeout.timeout(TEST_TIMEOUT):
await self._save_event.wait()
return self._segments
async def join(self):
"""Verify save worker was invoked and block on shutdown."""
with async_timeout.timeout(TEST_TIMEOUT):
await self._save_event.wait()
self._save_thread.join(timeout=TEST_TIMEOUT)
assert not self._save_thread.is_alive()
def reset(self):
"""Reset callback state for reuse in tests."""
self._save_thread = None
self._save_event = asyncio.Event()
@pytest.fixture()
def record_worker_sync(hass):
"""Patch recorder_save_worker for clean thread shutdown for test."""
sync = SaveRecordWorkerSync()
with patch(
"homeassistant.components.stream.recorder.recorder_save_worker",
side_effect=sync.recorder_save_worker,
autospec=True,
):
yield sync
async def test_record_stream(hass, hass_client, record_worker_sync):
"""
Test record stream.
Tests full integration with the stream component, and captures the
stream worker and save worker to allow for clean shutdown of background
threads. The actual save logic is tested in test_recorder_save below.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
# After stream decoding finishes, the record worker thread starts
segments = await record_worker_sync.get_segments()
assert len(segments) >= 1
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
stream.stop()
async def test_record_lookback(
hass, hass_client, stream_worker_sync, record_worker_sync
):
"""Exercise record with loopback."""
await async_setup_component(hass, "stream", {"stream": {}})
source = generate_h264_video()
stream = create_stream(hass, source)
# Start an HLS feed to enable lookback
stream.add_provider("hls")
stream.start()
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path", lookback=4)
# This test does not need recorder cleanup since it is not fully exercised
stream.stop()
async def test_recorder_timeout(hass, hass_client, stream_worker_sync):
"""
Test recorder timeout.
Mocks out the cleanup to assert that it is invoked after a timeout.
This test does not start the recorder save thread.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
with patch("homeassistant.components.stream.IdleTimer.fire") as mock_timeout:
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider("recorder")
await recorder.recv()
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_timeout.called
stream_worker_sync.resume()
stream.stop()
await hass.async_block_till_done()
await hass.async_block_till_done()
async def test_record_path_not_allowed(hass, hass_client):
"""Test where the output path is not allowed by home assistant configuration."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(
hass.config, "is_allowed_path", return_value=False
), pytest.raises(HomeAssistantError):
await stream.async_record("/example/path")
async def test_recorder_save(tmpdir):
"""Test recorder save."""
# Setup
source = generate_h264_video()
filename = f"{tmpdir}/test.mp4"
# Run
recorder_save_worker(
filename, [Segment(1, *get_init_and_moof_data(source.getbuffer()), 4)]
)
# Assert
assert os.path.exists(filename)
async def test_recorder_discontinuity(tmpdir):
"""Test recorder save across a discontinuity."""
# Setup
source = generate_h264_video()
filename = f"{tmpdir}/test.mp4"
# Run
init, moof_data = get_init_and_moof_data(source.getbuffer())
recorder_save_worker(
filename,
[
Segment(1, init, moof_data, 4, 0),
Segment(2, init, moof_data, 4, 1),
],
)
# Assert
assert os.path.exists(filename)
async def test_recorder_no_segments(tmpdir):
"""Test recorder behavior with a stream failure which causes no segments."""
# Setup
filename = f"{tmpdir}/test.mp4"
# Run
recorder_save_worker("unused-file", [])
# Assert
assert not os.path.exists(filename)
async def test_record_stream_audio(
hass, hass_client, stream_worker_sync, record_worker_sync
):
"""
Test treatment of different audio inputs.
Record stream output should have an audio channel when input has
a valid codec and audio packets and no audio channel otherwise.
"""
await async_setup_component(hass, "stream", {"stream": {}})
for a_codec, expected_audio_streams in (
("aac", 1), # aac is a valid mp4 codec
("pcm_mulaw", 0), # G.711 is not a valid mp4 codec
("empty", 0), # audio stream with no packets
(None, 0), # no audio stream
):
record_worker_sync.reset()
stream_worker_sync.pause()
# Setup demo track
source = generate_h264_video(
container_format="mov", audio_codec=a_codec
) # mov can store PCM
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider("recorder")
while True:
segment = await recorder.recv()
if not segment:
break
last_segment = segment
stream_worker_sync.resume()
result = av.open(
BytesIO(last_segment.init + last_segment.moof_data), "r", format="mp4"
)
assert len(result.streams.audio) == expected_audio_streams
result.close()
stream.stop()
await hass.async_block_till_done()
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
async def test_recorder_log(hass, caplog):
"""Test starting a stream to record logs the url without username and password."""
await async_setup_component(hass, "stream", {"stream": {}})
stream = create_stream(hass, "https://abcd:[email protected]")
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
assert "https://abcd:[email protected]" not in caplog.text
assert "https://****:****@foo.bar" in caplog.text
| apache-2.0 | 2,988,359,849,612,386,000 | 31.066667 | 86 | 0.661998 | false |
nihilus/epanos | pyc_fmtstr_parser/printf_parse.py | 1 | 12979 | # ported from gnulib rev be7d73709d2b3bceb987f1be00a049bb7021bf87
#
# Copyright (C) 2014, Mark Laws.
# Copyright (C) 1999, 2002-2003, 2005-2007, 2009-2014 Free Software
# Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import ctypes
from flufl.enum import Enum
sizeof = ctypes.sizeof
Arg_type = Enum('Arg_type', [str(x.strip()) for x in '''
TYPE_NONE
TYPE_SCHAR
TYPE_UCHAR
TYPE_SHORT
TYPE_USHORT
TYPE_INT
TYPE_UINT
TYPE_LONGINT
TYPE_ULONGINT
TYPE_LONGLONGINT
TYPE_ULONGLONGINT
TYPE_DOUBLE
TYPE_LONGDOUBLE
TYPE_CHAR
TYPE_WIDE_CHAR
TYPE_STRING
TYPE_WIDE_STRING
TYPE_POINTER
TYPE_COUNT_SCHAR_POINTER
TYPE_COUNT_SHORT_POINTER
TYPE_COUNT_INT_POINTER
TYPE_COUNT_LONGINT_POINTER
TYPE_COUNT_LONGLONGINT_POINTER
'''.splitlines() if x != ''])
FLAG_GROUP = 1 # ' flag
FLAG_LEFT = 2 # - flag
FLAG_SHOWSIGN = 4 # + flag
FLAG_SPACE = 8 # space flag
FLAG_ALT = 16 # # flag
FLAG_ZERO = 32
# arg_index value indicating that no argument is consumed.
ARG_NONE = ~0
class Argument(object):
__slots__ = ['type', 'data']
class Arguments(object):
__slots__ = ['count', 'arg']
def __init__(self):
self.count = 0
self.arg = []
class Directive(object):
'''A parsed directive.'''
__slots__ = ['dir_start', 'dir_end', 'flags', 'width_start', 'width_end',
'width_arg_index', 'precision_start', 'precision_end',
'precision_arg_index', 'conversion', 'arg_index']
# conversion: d i o u x X f F e E g G a A c s p n U % but not C S
def __init__(self):
self.flags = 0
self.width_start = None
self.width_end = None
self.width_arg_index = ARG_NONE
self.precision_start = None
self.precision_end = None
self.precision_arg_index = ARG_NONE
self.arg_index = ARG_NONE
class Directives(object):
'''A parsed format string.'''
__slots__ = ['count', 'dir', 'max_width_length', 'max_precision_length']
def __init__(self):
self.count = 0
self.dir = []
def REGISTER_ARG(a, index, type):
n = index
while a.count <= n:
try:
a.arg[a.count]
except IndexError:
a.arg.append(Argument())
a.arg[a.count].type = Arg_type.TYPE_NONE
a.count += 1
if a.arg[n].type == Arg_type.TYPE_NONE:
a.arg[n].type = type
elif a.arg[n].type != type:
raise ValueError('ambiguous type for positional argument')
def conv_signed(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGLONGINT
else:
# If 'long long' exists and is the same as 'long', we parse "lld" into
# TYPE_LONGINT.
if flags >= 8:
type = Arg_type.TYPE_LONGINT
elif flags & 2:
type = Arg_type.TYPE_SCHAR
elif flags & 1:
type = Arg_type.TYPE_SHORT
else:
type = Arg_type.TYPE_INT
return c, type
def conv_unsigned(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_ULONGLONGINT
else:
# If 'unsigned long long' exists and is the same as 'unsigned long', we
# parse "llu" into TYPE_ULONGINT.
if flags >= 8:
type = Arg_type.TYPE_ULONGINT
elif flags & 2:
type = Arg_type.TYPE_UCHAR
elif flags & 1:
type = Arg_type.TYPE_USHORT
else:
type = Arg_type.TYPE_UINT
return c, type
def conv_float(c, flags):
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGDOUBLE
else:
return c, Arg_type.TYPE_DOUBLE
def conv_char(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_CHAR
else:
return c, Arg_type.TYPE_CHAR
def conv_widechar(c, flags):
c = 'c'
return c, Arg_type.TYPE_WIDE_CHAR
def conv_string(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_STRING
else:
return c, Arg_type.TYPE_STRING
def conv_widestring(c, flags):
c = 's'
return c, Arg_type.TYPE_WIDE_STRING
def conv_pointer(c, flags):
return c, Arg_type.TYPE_POINTER
def conv_intpointer(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_COUNT_LONGLONGINT_POINTER
else:
# If 'long long' exists and is the same as 'long', we parse "lln" into
# TYPE_COUNT_LONGINT_POINTER.
if flags >= 8:
type = Arg_type.TYPE_COUNT_LONGINT_POINTER
elif flags & 2:
type = Arg_type.TYPE_COUNT_SCHAR_POINTER
elif flags & 1:
type = Arg_type.TYPE_COUNT_SHORT_POINTER
else:
type = Arg_type.TYPE_COUNT_INT_POINTER
return c, type
def conv_none(c, flags):
return c, Arg_type.TYPE_NONE
_conv_char = {
'd': conv_signed,
'i': conv_signed,
'o': conv_unsigned,
'u': conv_unsigned,
'x': conv_unsigned,
'X': conv_unsigned,
'f': conv_float,
'F': conv_float,
'e': conv_float,
'E': conv_float,
'g': conv_float,
'G': conv_float,
'a': conv_float,
'A': conv_float,
'c': conv_char,
'C': conv_widechar,
's': conv_string,
'S': conv_widestring,
'p': conv_pointer,
'n': conv_intpointer,
'%': conv_none
}
def printf_parse(fmt):
'''Parses the format string. Fills in the number N of directives, and fills
in directives[0], ..., directives[N-1], and sets directives[N].dir_start to
the end of the format string. Also fills in the arg_type fields of the
arguments and the needed count of arguments.'''
cp = 0 # index into format string
arg_posn = 0 # number of regular arguments consumed
max_width_length = 0
max_precision_length = 0
d = Directives()
a = Arguments()
while True:
try:
c = fmt[cp]
except IndexError:
break
cp += 1
if c == '%':
arg_index = ARG_NONE
d.dir.append(Directive())
dp = d.dir[d.count]
dp.dir_start = cp - 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
arg_index = n - 1
cp = np + 1
# Read the flags.
while True:
if fmt[cp] == '\'':
dp.flags |= FLAG_GROUP
cp += 1
elif fmt[cp] == '-':
dp.flags |= FLAG_LEFT
cp += 1
elif fmt[cp] == '+':
dp.flags |= FLAG_SHOWSIGN
cp += 1
elif fmt[cp] == ' ':
dp.flags |= FLAG_SPACE
cp += 1
elif fmt[cp] == '#':
dp.flags |= FLAG_ALT
cp += 1
elif fmt[cp] == '0':
dp.flags |= FLAG_ZERO
cp += 1
else:
break
# Parse the field width.
if fmt[cp] == '*':
dp.width_start = cp
cp += 1
dp.width_end = cp
if max_width_length < 1:
max_width_length = 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.width_arg_index = n - 1
cp = np + 1
if dp.width_arg_index == ARG_NONE:
dp.width_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.width_arg_index, Arg_type.TYPE_INT)
elif fmt[cp].isdigit():
dp.width_start = cp
while fmt[cp].isdigit():
cp += 1
dp.width_end = cp
width_length = dp.width_end - dp.width_start
if max_width_length < width_length:
max_width_length = width_length
# Parse the precision.
if fmt[cp] == '.':
cp += 1
if fmt[cp] == '*':
dp.precision_start = cp - 1
cp += 1
dp.precision_end = cp
if max_precision_length < 2:
max_precision_length = 2
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.precision_arg_index = n - 1
cp = np + 1
if dp.precision_arg_index == ARG_NONE:
dp.precision_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.precision_arg_index, Arg_type.TYPE_INT)
else:
dp.precision_start = cp - 1
while fmt[cp].isdigit():
cp += 1
dp.precision_end = cp
precision_length = dp.precision_end - dp.precision_start
if max_precision_length < precision_length:
max_precision_length = precision_length
# Parse argument type/size specifiers.
flags = 0
while True:
if fmt[cp] == 'h':
flags |= (1 << (flags & 1))
cp += 1
elif fmt[cp] == 'L':
flags |= 4
cp += 1
elif fmt[cp] == 'l':
flags += 8
cp += 1
elif fmt[cp] == 'j':
raise ValueError("don't know how to handle intmax_t")
elif fmt[cp] == 'z':
if sizeof(ctypes.c_size_t) > sizeof(ctypes.c_long):
# size_t = long long
flags += 16
elif sizeof(ctypes.c_size_t) > sizeof(ctypes.c_int):
# size_t = long
flags += 8
cp += 1
elif fmt[cp] == 't':
raise ValueError("don't know how to handle ptrdiff_t")
else:
break
# Read the conversion character.
c = fmt[cp]
cp += 1
try:
c, type = _conv_char[c](c, flags)
except KeyError:
raise ValueError('bad conversion character: %%%s' % c)
if type != Arg_type.TYPE_NONE:
dp.arg_index = arg_index
if dp.arg_index == ARG_NONE:
dp.arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.arg_index, type)
dp.conversion = c
dp.dir_end = cp
d.count += 1
d.dir.append(Directive())
d.dir[d.count].dir_start = cp
d.max_width_length = max_width_length
d.max_precision_length = max_precision_length
return d, a
| mit | -4,507,147,994,739,678,700 | 28.974596 | 80 | 0.47677 | false |
ktarrant/options_csv | journal/trades/migrations/0001_initial.py | 1 | 1392 | # Generated by Django 2.1 on 2018-08-28 05:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Leg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=16)),
('exec_date', models.DateTimeField(verbose_name='date executed')),
('buy_or_sell', models.CharField(choices=[('buy', 'Buy'), ('sell', 'Sell')], default='buy', max_length=4)),
('open_or_close', models.CharField(choices=[('open', 'Open'), ('close', 'Close')], default='open', max_length=5)),
('instrument', models.CharField(choices=[('call', 'Call'), ('put', 'Put'), ('stock', 'Stock'), ('fut', 'Futures')], default='call', max_length=5)),
('quantity', models.PositiveIntegerField()),
('execution_price', models.FloatField()),
('execution_fees', models.FloatField(default=0)),
('expiration_date', models.DateTimeField(null=True, verbose_name='expiration')),
('margin', models.FloatField(null=True)),
('underlying_price', models.FloatField(null=True)),
],
),
]
| mit | 1,929,704,377,205,315,600 | 43.903226 | 163 | 0.558908 | false |
rschnapka/partner-contact | base_partner_merge/validate_email.py | 1 | 4573 | # -*- coding: utf-8 -*-
# RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <[email protected]>
# Extended from (c) 2011 Noel Bush <[email protected]>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
try:
import DNS
ServerError = DNS.ServerError
except:
DNS = None
class ServerError(Exception):
pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]'
CRLF = r'(?:\r\n)'
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f'
QUOTED_PAIR = r'(?:\\.)'
FWS = r'(?:(?:{0}*{1})?{0}+)'.format(WSP, CRLF)
CTEXT = r'[{0}\x21-\x27\x2a-\x5b\x5d-\x7e]'.format(NO_WS_CTL)
CCONTENT = r'(?:{0}|{1})'.format(CTEXT, QUOTED_PAIR)
COMMENT = r'\((?:{0}?{1})*{0}?\)'.format(FWS, CCONTENT)
CFWS = r'(?:{0}?{1})*(?:{0}?{1}|{0})'.format(FWS, COMMENT)
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]'
ATOM = r'{0}?{1}+{0}?'.format(CFWS, ATEXT)
DOT_ATOM_TEXT = r'{0}+(?:\.{0}+)*'.format(ATEXT)
DOT_ATOM = r'{0}?{1}{0}?'.format(CFWS, DOT_ATOM_TEXT)
QTEXT = r'[{0}\x21\x23-\x5b\x5d-\x7e]'.format(NO_WS_CTL)
QCONTENT = r'(?:{0}|{1})'.format(QTEXT, QUOTED_PAIR)
QUOTED_STRING = r'{0}?"(?:{1}?{2})*{1}?"{0}?'.format(CFWS, FWS, QCONTENT)
LOCAL_PART = r'(?:{0}|{1})'.format(DOT_ATOM, QUOTED_STRING)
DTEXT = r'[{0}\x21-\x5a\x5e-\x7e]'.format(NO_WS_CTL)
DCONTENT = r'(?:{0}|{1})'.format(DTEXT, QUOTED_PAIR)
DOMAIN_LITERAL = r'{0}?\[(?:{1}?{2})*{1}?\]{0}?'.format(CFWS, FWS, DCONTENT)
DOMAIN = r'(?:{0}|{1})'.format(DOT_ATOM, DOMAIN_LITERAL)
ADDR_SPEC = r'{0}@{1}'.format(LOCAL_PART, DOMAIN)
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
def validate_email(email, check_mx=False, verify=False):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS:
raise Exception('For check the mx records or check if the '
'email exists you must have installed pyDNS '
'python package')
DNS.DiscoverNameServers()
hostname = email[email.find('@') + 1:]
mx_hosts = DNS.mxlookup(hostname)
for mx in mx_hosts:
try:
smtp = smtplib.SMTP()
smtp.connect(mx[1])
if not verify:
return True
status, _ = smtp.helo()
if status != 250:
continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status != 250:
return False
break
# Server not permits verify user
except smtplib.SMTPServerDisconnected:
break
except smtplib.SMTPConnectError:
continue
except (AssertionError, ServerError):
return False
return True
| agpl-3.0 | 1,122,090,450,398,727,800 | 40.572727 | 77 | 0.605292 | false |
eblume/tf-idf | tfidf/preprocess.py | 1 | 9539 | #!/usr/bin/env python3
"""Pre processing step for text.
Example:
pp = Preprocesses()
"""
from __future__ import absolute_import, with_statement
import re
from collections import namedtuple
from cachetools import LRUCache, cached # python2 support
from nltk.stem import SnowballStemmer
from six.moves.html_parser import HTMLParser # python2 support
from stop_words import get_stop_words
from .dockeyword import Keyword
unescape = HTMLParser().unescape
def handle_unicode(text):
"""Needed for the description fields."""
if re.search(r'\\+((u([0-9]|[a-z]|[A-Z]){4}))', text):
text = text.encode('utf-8').decode('unicode-escape')
text = re.sub(r'\\n', '\n', text)
text = re.sub(r'\\t', '\t', text)
return text
def handle_html_unquote(text):
"""Detect if there are HTML encoded characters, then decode them."""
if re.search(r'(&#?x?)([A-Z]|[a-z]|[0-9]){2,10};', text):
text = unescape(text)
return text
def handle_mac_quotes(text):
"""Handle the unfortunate non-ascii quotes OSX inserts."""
text = text.replace('“', '"').replace('”', '"')\
.replace('‘', "'").replace('’', "'")
return text
def handle_text_break_dash(text):
"""Convert text break dashes into semicolons to simplify things.
Example:
"She loved icecream- mint chip especially"
"She loved icecream - mint chip especially"
both convert to
"She loved icecream; mint chip especially"
However,
"The 27-year-old could eat icecream any day"
will not be changed.
"""
return re.sub(r'\s+-\s*|\s*-\s+', ';', text)
def clean_text(raw_text):
"""Strip text of non useful characters."""
# Must strip HTML tags out first!
text = re.sub('<[^<]+?>', '', raw_text)
text = handle_unicode(text)
text = handle_html_unquote(text)
text = handle_mac_quotes(text)
text = handle_text_break_dash(text)
text = text.lower()
regex_subs = ['\t\n\r', '\s+', '&']
for regex_sub in regex_subs:
text = re.sub(regex_sub, ' ', text)
return text
class Preprocessor(object):
"""Prep the text for TF-IDF calculations.
Fixes some unicode problems, handles HTML character encoding,
and removes HTML tags.
Strips some non alphanumeric characters, but keeps ngram boundary
markers (eg, period (',') and semi-colon (';'))
If a stopwords file is provided, it will remove stopwords.
Example:
>>> processor = Preprocessor('english_stopwords.txt')
>>> processor.clean('He was an interesting fellow.')
"was interesting fellow."
"""
stopwords = set()
contractions = r"(n't|'s|'re)$"
negative_gram_breaks = r'[^:;!^,\?\.\[|\]\(|\)"`]+'
supported_languages = (
'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian',
'italian', 'kazakh', 'norwegian', 'porter', 'portuguese', 'romanian',
'russian', 'spanish', 'swedish', 'turkish'
)
def __init__(self, language=None, gramsize=1, all_ngrams=True,
stopwords_file=None, stemmer=None):
"""Preprocessor must be initalized for use if using stopwords.
stopwords_file (filename): contains stopwords, one per line
stemmer (function): takes in a word and returns the stemmed version
gramsize (int): maximum word size for ngrams
all_ngrams (bool):
if true, all possible ngrams of length "gramsize" and smaller will
be examined. If false, only ngrams of _exactly_ length "gramsize"
will be run.
negative_gram_breaks (regex):
if a word ends with one of these characters, an
ngram may not cross that. Expressed as a _negative_ regex.
Example:
in the sentence "Although he saw the car, he ran across the street"
"car he" may not be a bi-gram
stopwords_file (filename):
Provide a list of stopwords. If used in addition to "language", the
provided stopwords file overrides the default.
stemmer (function):
A function that takes in a single argument (str) and returns a string
as the stemmed word. Overrides the default behavior if specified.
Default None:
Use the NLTK snowball stemmer for the sepcified language. If
language is not found, no stemming will take place.
"""
if language:
assert language in self.supported_languages
if language in SnowballStemmer.languages:
sb_stemmer = SnowballStemmer(language)
self.__stemmer = sb_stemmer.stem
else:
self.__stemmer = lambda x: x # no change to word
self.stopwords = get_stop_words(language)
if stopwords_file:
self._load_stopwords(stopwords_file)
if stemmer:
self.__stemmer = stemmer
self.__gramsize = gramsize
self.__all_ngrams = all_ngrams
@property
def gramsize(self):
"""Number of words in the ngram."""
return self.__gramsize
@property
def all_ngrams(self):
"""True if ngrams of size "gramsize" or smaller will be generated.
False if only ngrams of _exactly_ size "gramsize" are generated.
"""
return self.__all_ngrams
def _load_stopwords(self, filename):
with open(filename) as f:
words = []
for line in f:
words.append(line.strip())
self.stopwords = set(words)
def handle_stopwords(self, text):
"""Remove stop words from the text."""
out = []
for word in text.split(' '):
# Remove common contractions for stopwords when checking list
check_me = re.sub(self.contractions, '', word)
if check_me in self.stopwords:
continue
out.append(word)
return ' '.join(out)
def normalize_term(self, text):
"""Clean first cleans the text characters, then removes the stopwords.
Assumes the input is already the number of words you want for the ngram.
"""
text = clean_text(text)
text = self.handle_stopwords(text)
return self.stem_term(text)
@cached(LRUCache(maxsize=10000))
def _stem(self, word):
"""The stem cache is used to cache up to 10,000 stemmed words.
This substantially speeds up the word stemming on larger documents.
"""
return self.__stemmer(word)
def stem_term(self, term):
"""Apply the standard word procesing (eg stemming). Returns a stemmed ngram."""
return ' '.join([self._stem(x) for x in term.split(' ')])
def yield_keywords(self, raw_text, document=None):
"""Yield keyword objects as mono, di, tri... *-grams.
Use this as an iterator.
Will not create ngrams across logical sentence breaks.
Example:
s = "Although he saw the car, he ran across the street"
the valid bigrams for the sentences are:
['Although he', 'saw the', 'he saw', 'the car',
'he ran', 'across the', 'ran across', 'the street']
"car he" is not a valid bi-gram
This will also stem words when applicable.
Example:
s = "All the cars were honking their horns."
['all', 'the', 'car', 'were', 'honk', 'their', 'horn']
"""
gramlist = range(1, self.gramsize + 1) if self.all_ngrams else [self.gramsize]
for sentence in positional_splitter(self.negative_gram_breaks, raw_text):
words = [x for x in positional_splitter(r'\S+', sentence.text)]
# Remove all stopwords
words_no_stopwords = []
for w in words:
# Remove common contractions for stopwords when checking list
check_me = re.sub(self.contractions, '', w.text)
if check_me not in self.stopwords:
words_no_stopwords.append(w)
# Make the ngrams
for gramsize in gramlist:
# You need to try as many offsets as chunk size
for offset in range(0, gramsize): # number of words offest
data = words_no_stopwords[offset:]
text_in_chunks = [data[pos:pos + gramsize]
for pos in range(0, len(data), gramsize)
if len(data[pos:pos + gramsize]) == gramsize]
for word_list in text_in_chunks:
word_text = ' '.join([self.stem_term(w.text) for w in word_list])
word_global_start = sentence.start + word_list[0].start
word_global_end = sentence.start + word_list[-1].end
yield Keyword(word_text, document=document,
start=word_global_start, end=word_global_end)
raise StopIteration
PositionalWord = namedtuple('PositionalWord', ['text', 'start', 'end'])
def positional_splitter(regex, text):
r"""Yield sentence chunks (as defined by the regex) as well as their location.
NOTE: the regex needs to be an "inverse match"
Example:
To split on whitespace, you match:
r'\S+' <-- "a chain of anything that's NOT whitespace"
"""
for res in re.finditer(regex, text):
yield PositionalWord(res.group(0), res.start(), res.end())
raise StopIteration
| mit | 2,284,034,132,267,696,400 | 35.94186 | 89 | 0.58934 | false |
Drakulix/knex | tests/api/test_security.py | 1 | 3177 | import requests
import os
class TestPOST(object):
def test_main_page(self, flask_api_url):
response = requests.get(flask_api_url + '/')
assert response.status_code == 404
def test_login_fake_user(self, flask_api_url):
session = requests.Session()
response = session.post(flask_api_url + '/api/users/login',
data=dict(email='user1', password='password'))
assert response.status_code == 403
def test_login_successful(self, flask_api_url):
session = requests.Session()
response = session.post(flask_api_url + '/api/users/login',
data=dict(email='[email protected]', password="admin"))
assert response.status_code == 200
def test_login_wrong_password(self, flask_api_url):
session = requests.Session()
response = session.post(flask_api_url + '/api/users/login',
data=dict(email='admin', password='a'))
assert response.status_code == 403
def test_logout(self, flask_api_url, session):
session = requests.Session()
session.post(flask_api_url + '/api/users/login',
data=dict(email='[email protected]', password="user"))
response = session.get(flask_api_url + '/api/users/logout')
assert response.status_code == 200
def test_access_login_required_logged(self, pytestconfig, flask_api_url):
test_manifest = os.path.join(
str(pytestconfig.rootdir),
'tests',
'testmanifests',
'validexample0.json5'
)
with open(test_manifest, 'r', encoding='utf-8') as tf:
data = str(tf.read().replace('\n', ''))
response = requests.post(flask_api_url + "/api/projects",
data=data.encode('utf-8'),
headers={'Content-Type': 'application/json5'})
assert response.status_code == 403
def test_access_login_required_not_logged(self, pytestconfig, flask_api_url):
requests.get(flask_api_url + '/api/users/logout')
test_manifest = os.path.join(
str(pytestconfig.rootdir),
'tests',
'testmanifests',
'validexample0.json5'
)
with open(test_manifest, 'r', encoding='utf-8') as tf:
data = str(tf.read().replace('\n', ''))
response = requests.post(flask_api_url + "/api/projects",
data=data.encode('utf-8'),
headers={'Content-Type': 'application/json5'})
assert response.status_code == 403
def test_get_user(self, session, enter_default_user_users, flask_api_url):
response = enter_default_user_users
assert response.status_code == 200
response = session.get(flask_api_url + '/api/users/' + '[email protected]')
assert response.status_code == 200
def test_user_nonexistent(self, flask_api_url):
response = requests.get(flask_api_url + '/api/users/',
data=dict(email='[email protected]'))
assert response.status_code == 404
| mit | 3,377,210,112,738,261,500 | 41.932432 | 84 | 0.569405 | false |
industrydive/fileflow | fileflow/operators/dive_python_operator.py | 1 | 1096 | """
.. module:: operators.dive_operator
:synopsis: DivePythonOperator for use with TaskRunner
.. moduleauthor:: Laura Lorenz <[email protected]>
.. moduleauthor:: Miriam Sexton <[email protected]>
"""
from airflow.operators import PythonOperator
from .dive_operator import DiveOperator
class DivePythonOperator(DiveOperator, PythonOperator):
"""
Python operator that can send along data dependencies to its callable.
Generates the callable by initializing its python object and calling its method.
"""
def __init__(self, python_object, python_method="run", *args, **kwargs):
self.python_object = python_object
self.python_method = python_method
kwargs['python_callable'] = None
super(DivePythonOperator, self).__init__(*args, **kwargs)
def pre_execute(self, context):
context.update(self.op_kwargs)
context.update({"data_dependencies": self.data_dependencies})
instantiated_object = self.python_object(context)
self.python_callable = getattr(instantiated_object, self.python_method)
| apache-2.0 | -2,527,563,072,624,418,000 | 34.354839 | 84 | 0.711679 | false |
awni/tensorflow | tensorflow/contrib/skflow/python/skflow/tests/test_multioutput.py | 1 | 1502 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from sklearn import datasets
from sklearn.metrics import accuracy_score, mean_squared_error
import tensorflow as tf
from tensorflow.contrib.skflow.python import skflow
class MultiOutputTest(tf.test.TestCase):
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
regressor = skflow.TensorFlowLinearRegressor(learning_rate=0.01)
regressor.fit(X, y)
score = mean_squared_error(regressor.predict(X), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -3,187,376,389,461,075,500 | 33.930233 | 78 | 0.709055 | false |
magcius/sweettooth | sweettooth/extensions/migrations/0008_new_icon_default.py | 1 | 6118 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
new_default = orm.Extension._meta.get_field_by_name('icon')[0].default
for ext in orm.Extension.objects.filter(icon=""):
ext.icon = new_default
ext.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'extensions.extension': {
'Meta': {'object_name': 'Extension'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': "'/static/images/plugin.png'", 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'screenshot': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'extensions.extensionversion': {
'Meta': {'unique_together': "(('extension', 'version'),)", 'object_name': 'ExtensionVersion'},
'extension': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['extensions.Extension']"}),
'extra_json_fields': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shell_versions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['extensions.ShellVersion']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.files.FileField', [], {'max_length': '223'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'extensions.shellversion': {
'Meta': {'object_name': 'ShellVersion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.PositiveIntegerField', [], {}),
'minor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'point': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['extensions']
| agpl-3.0 | -7,303,733,742,660,065,000 | 67.741573 | 182 | 0.556064 | false |
snorfalorpagus/pytest-qt | tests/test_logging.py | 1 | 12411 | import datetime
import pytest
from pytestqt.qt_compat import qDebug, qWarning, qCritical, QtDebugMsg, \
QtWarningMsg, QtCriticalMsg, QT_API
@pytest.mark.parametrize('test_succeeds', [True, False])
@pytest.mark.parametrize('qt_log', [True, False])
def test_basic_logging(testdir, test_succeeds, qt_log):
"""
Test Qt logging capture output.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
import sys
from pytestqt.qt_compat import qDebug, qWarning, qCritical, \
qInstallMessageHandler, qInstallMsgHandler
def to_unicode(s):
return s.decode('utf-8', 'replace') if isinstance(s, bytes) else s
if qInstallMessageHandler:
def print_msg(msg_type, context, message):
sys.stderr.write(to_unicode(message) + '\\n')
qInstallMessageHandler(print_msg)
else:
def print_msg(msg_type, message):
sys.stderr.write(to_unicode(message) + '\\n')
qInstallMsgHandler(print_msg)
def test_types():
qDebug('this is a DEBUG message')
qWarning('this is a WARNING message')
qCritical('this is a CRITICAL message')
assert {0}
""".format(test_succeeds)
)
res = testdir.runpytest(*(['--no-qt-log'] if not qt_log else []))
if test_succeeds:
assert 'Captured Qt messages' not in res.stdout.str()
assert 'Captured stderr call' not in res.stdout.str()
else:
if qt_log:
res.stdout.fnmatch_lines([
'*-- Captured Qt messages --*',
'*QtDebugMsg: this is a DEBUG message*',
'*QtWarningMsg: this is a WARNING message*',
'*QtCriticalMsg: this is a CRITICAL message*',
])
else:
res.stdout.fnmatch_lines([
'*-- Captured stderr call --*',
'this is a DEBUG message*',
'this is a WARNING message*',
'this is a CRITICAL message*',
])
def test_qtlog_fixture(qtlog):
"""
Test qtlog fixture.
"""
qDebug('this is a DEBUG message')
qWarning('this is a WARNING message')
qCritical('this is a CRITICAL message')
records = [(m.type, m.message.strip()) for m in qtlog.records]
assert records == [
(QtDebugMsg, 'this is a DEBUG message'),
(QtWarningMsg, 'this is a WARNING message'),
(QtCriticalMsg, 'this is a CRITICAL message'),
]
# `records` attribute is read-only
with pytest.raises(AttributeError):
qtlog.records = []
def test_fixture_with_logging_disabled(testdir):
"""
Test that qtlog fixture doesn't capture anything if logging is disabled
in the command line.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning
def test_types(qtlog):
qWarning('message')
assert qtlog.records == []
"""
)
res = testdir.runpytest('--no-qt-log')
res.stdout.fnmatch_lines('*1 passed*')
@pytest.mark.parametrize('use_context_manager', [True, False])
def test_disable_qtlog_context_manager(testdir, use_context_manager):
"""
Test qtlog.disabled() context manager.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
if use_context_manager:
code = 'with qtlog.disabled():'
else:
code = 'if 1:'
testdir.makepyfile(
"""
from pytestqt.qt_compat import qCritical
def test_1(qtlog):
{code}
qCritical('message')
""".format(code=code)
)
res = testdir.inline_run()
passed = 1 if use_context_manager else 0
res.assertoutcome(passed=passed, failed=int(not passed))
@pytest.mark.parametrize('use_mark', [True, False])
def test_disable_qtlog_mark(testdir, use_mark):
"""
Test mark which disables logging capture for a test.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
mark = '@pytest.mark.no_qt_log' if use_mark else ''
testdir.makepyfile(
"""
from pytestqt.qt_compat import qCritical
import pytest
{mark}
def test_1():
qCritical('message')
""".format(mark=mark)
)
res = testdir.inline_run()
passed = 1 if use_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_logging_formatting(testdir):
"""
Test custom formatting for logging messages.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning
def test_types():
qWarning('this is a WARNING message')
assert 0
"""
)
f = '{rec.type_name} {rec.log_type_name} {rec.when:%Y-%m-%d}: {rec.message}'
res = testdir.runpytest('--qt-log-format={0}'.format(f))
today = '{0:%Y-%m-%d}'.format(datetime.datetime.now())
res.stdout.fnmatch_lines([
'*-- Captured Qt messages --*',
'QtWarningMsg WARNING {0}: this is a WARNING message*'.format(today),
])
@pytest.mark.parametrize('level, expect_passes',
[('DEBUG', 1), ('WARNING', 2), ('CRITICAL', 3),
('NO', 4)],
)
def test_logging_fails_tests(testdir, level, expect_passes):
"""
Test qt_log_level_fail ini option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = {level}
""".format(level=level)
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning, qCritical, qDebug
def test_1():
qDebug('this is a DEBUG message')
def test_2():
qWarning('this is a WARNING message')
def test_3():
qCritical('this is a CRITICAL message')
def test_4():
assert 1
"""
)
res = testdir.runpytest()
lines = []
if level != 'NO':
lines.extend([
'*Failure: Qt messages with level {0} or above emitted*'.format(
level.upper()),
'*-- Captured Qt messages --*',
])
lines.append('*{0} passed*'.format(expect_passes))
res.stdout.fnmatch_lines(lines)
def test_logging_fails_tests_mark(testdir):
"""
Test mark overrides what's configured in the ini file.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning, qCritical, qDebug
import pytest
@pytest.mark.qt_log_level_fail('WARNING')
def test_1():
qWarning('message')
"""
)
res = testdir.inline_run()
res.assertoutcome(failed=1)
def test_logging_fails_ignore(testdir):
"""
Test qt_log_ignore config option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore =
WM_DESTROY.*sent
WM_PAINT not handled
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning, qCritical
import pytest
def test1():
qCritical('a critical message')
def test2():
qCritical('WM_DESTROY was sent')
def test3():
qCritical('WM_DESTROY was sent')
assert 0
def test4():
qCritical('WM_PAINT not handled')
qCritical('another critical message')
"""
)
res = testdir.runpytest()
lines = [
# test1 fails because it has emitted a CRITICAL message and that message
# does not match any regex in qt_log_ignore
'*_ test1 _*',
'*Failure: Qt messages with level CRITICAL or above emitted*',
'*QtCriticalMsg: a critical message*',
# test2 succeeds because its message matches qt_log_ignore
# test3 fails because of an assert, but the ignored message should
# still appear in the failure message
'*_ test3 _*',
'*AssertionError*',
'*QtCriticalMsg: WM_DESTROY was sent*(IGNORED)*',
# test4 fails because one message is ignored but the other isn't
'*_ test4 _*',
'*Failure: Qt messages with level CRITICAL or above emitted*',
'*QtCriticalMsg: WM_PAINT not handled*(IGNORED)*',
'*QtCriticalMsg: another critical message*',
# summary
'*3 failed, 1 passed*',
]
res.stdout.fnmatch_lines(lines)
@pytest.mark.parametrize('mark_regex', ['WM_DESTROY.*sent', 'no-match', None])
def test_logging_fails_ignore_mark(testdir, mark_regex):
"""
Test qt_log_ignore mark overrides config option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
if mark_regex:
mark = '@pytest.mark.qt_log_ignore("{0}")'.format(mark_regex)
else:
mark = ''
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning, qCritical
import pytest
{mark}
def test1():
qCritical('WM_DESTROY was sent')
""".format(mark=mark)
)
res = testdir.inline_run()
passed = 1 if mark_regex == 'WM_DESTROY.*sent' else 0
res.assertoutcome(passed=passed, failed=int(not passed))
@pytest.mark.parametrize('apply_mark', [True, False])
def test_logging_fails_ignore_mark_multiple(testdir, apply_mark):
"""
Make sure qt_log_ignore mark supports multiple arguments.
:type testdir: _pytest.pytester.TmpTestdir
"""
if apply_mark:
mark = '@pytest.mark.qt_log_ignore("WM_DESTROY", "WM_PAINT")'
else:
mark = ''
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning, qCritical
import pytest
@pytest.mark.qt_log_level_fail('CRITICAL')
{mark}
def test1():
qCritical('WM_PAINT was sent')
""".format(mark=mark)
)
res = testdir.inline_run()
passed = 1 if apply_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_lineno_failure(testdir):
"""
Test that tests when failing because log messages were emitted report
the correct line number.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = WARNING
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning
def test_foo():
assert foo() == 10
def foo():
qWarning('this is a WARNING message')
return 10
"""
)
res = testdir.runpytest()
if QT_API == 'pyqt5':
res.stdout.fnmatch_lines([
'*test_lineno_failure.py:2: Failure*',
'*test_lineno_failure.py:foo:5:*',
' QtWarningMsg: this is a WARNING message',
])
else:
res.stdout.fnmatch_lines('*test_lineno_failure.py:2: Failure*')
@pytest.mark.skipif(QT_API != 'pyqt5',
reason='Context information only available in PyQt5')
def test_context_none(testdir):
"""
Sometimes PyQt5 will emit a context with some/all attributes set as None
instead of appropriate file, function and line number.
Test that when this happens the plugin doesn't break.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import QtWarningMsg
def test_foo(request):
log_capture = request.node.qt_log_capture
context = log_capture._Context(None, None, None)
log_capture._handle_with_context(QtWarningMsg,
context, "WARNING message")
assert 0
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines([
'*Failure*',
'*None:None:None:*',
])
| lgpl-3.0 | -3,890,830,085,458,875,400 | 27.93007 | 80 | 0.571106 | false |
mattboyer/sqbrite | src/record.py | 1 | 5656 | # MIT License
#
# Copyright (c) 2017 Matt Boyer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pdb
from . import _LOGGER
from .field import (Field, MalformedField)
from .utils import (Varint, IndexDict)
class MalformedRecord(Exception):
pass
class Record(object):
column_types = {
0: (0, "NULL"),
1: (1, "8-bit twos-complement integer"),
2: (2, "big-endian 16-bit twos-complement integer"),
3: (3, "big-endian 24-bit twos-complement integer"),
4: (4, "big-endian 32-bit twos-complement integer"),
5: (6, "big-endian 48-bit twos-complement integer"),
6: (8, "big-endian 64-bit twos-complement integer"),
7: (8, "Floating point"),
8: (0, "Integer 0"),
9: (0, "Integer 1"),
}
def __init__(self, record_bytes):
self._bytes = record_bytes
self._header_bytes = None
self._fields = IndexDict()
self._parse()
def __bytes__(self):
return self._bytes
@property
def header(self):
return self._header_bytes
@property
def fields(self):
return self._fields
def truncate(self, new_length):
self._bytes = self._bytes[:new_length]
self._parse()
def _parse(self):
header_offset = 0
header_length_varint = Varint(
# A varint is encoded on *at most* 9 bytes
bytes(self)[header_offset:9 + header_offset]
)
# Let's keep track of how many bytes of the Record header (including
# the header length itself) we've succesfully parsed
parsed_header_bytes = len(header_length_varint)
if len(bytes(self)) < int(header_length_varint):
raise MalformedRecord(
"Not enough bytes to fully read the record header!"
)
header_offset += len(header_length_varint)
self._header_bytes = bytes(self)[:int(header_length_varint)]
col_idx = 0
field_offset = int(header_length_varint)
while header_offset < int(header_length_varint):
serial_type_varint = Varint(
bytes(self)[header_offset:9 + header_offset]
)
serial_type = int(serial_type_varint)
col_length = None
try:
col_length, _ = self.column_types[serial_type]
except KeyError:
if serial_type >= 13 and (1 == serial_type % 2):
col_length = (serial_type - 13) // 2
elif serial_type >= 12 and (0 == serial_type % 2):
col_length = (serial_type - 12) // 2
else:
raise ValueError(
"Unknown serial type {}".format(serial_type)
)
try:
field_obj = Field(
col_idx,
serial_type,
bytes(self)[field_offset:field_offset + col_length]
)
except MalformedField as ex:
_LOGGER.warning(
"Caught %r while instantiating field %d (%d)",
ex, col_idx, serial_type
)
raise MalformedRecord
except Exception as ex:
_LOGGER.warning(
"Caught %r while instantiating field %d (%d)",
ex, col_idx, serial_type
)
pdb.set_trace()
raise
self._fields[col_idx] = field_obj
col_idx += 1
field_offset += col_length
parsed_header_bytes += len(serial_type_varint)
header_offset += len(serial_type_varint)
if field_offset > len(bytes(self)):
raise MalformedRecord
# assert(parsed_header_bytes == int(header_length_varint))
def print_fields(self, table=None):
for field_idx in self._fields:
field_obj = self._fields[field_idx]
if not table or table.columns is None:
_LOGGER.info(
"\tField %d (%d bytes), type %d: %s",
field_obj.index,
len(field_obj),
field_obj.serial_type,
field_obj.value
)
else:
_LOGGER.info(
"\t%s: %s",
table.columns[field_obj.index],
field_obj.value
)
def __repr__(self):
return '<Record {} fields, {} bytes, header: {} bytes>'.format(
len(self._fields), len(bytes(self)), len(self.header)
)
| mit | 4,934,288,116,914,393,000 | 33.487805 | 79 | 0.554455 | false |
flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/tools/appengine_rpc_test_util.py | 1 | 6895 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing code that uses appengine_rpc's *RpcServer."""
import logging
import StringIO
import urllib2
from googlecloudsdk.third_party.appengine.tools.appengine_rpc import AbstractRpcServer
from googlecloudsdk.third_party.appengine.tools.appengine_rpc import HttpRpcServer
class TestRpcServerMixin(object):
"""Provides a mocked-out version of HttpRpcServer for testing purposes."""
def set_strict(self, strict=True):
"""Enables strict mode."""
self.opener.set_strict(strict)
def _GetOpener(self):
"""Returns a MockOpener.
Returns:
A MockOpener object.
"""
return TestRpcServerMixin.MockOpener()
class MockResponse(object):
"""A mocked out response object for testing purposes."""
def __init__(self, body, code=200, headers=None):
"""Creates a new MockResponse.
Args:
body: The text of the body to return.
code: The response code (default 200).
headers: An optional header dictionary.
"""
self.fp = StringIO.StringIO(body)
self.code = code
self.headers = headers
self.msg = ""
if self.headers is None:
self.headers = {}
def info(self):
return self.headers
def read(self, length=-1):
"""Reads from the response body.
Args:
length: The number of bytes to read.
Returns:
The body of the response.
"""
return self.fp.read(length)
def readline(self):
"""Reads a line from the response body.
Returns:
A line of text from the response body.
"""
return self.fp.readline()
def close(self):
"""Closes the response stream."""
self.fp.close()
class MockOpener(object):
"""A mocked-out OpenerDirector for testing purposes."""
def __init__(self):
"""Creates a new MockOpener."""
self.requests = []
self.responses = {}
self.ordered_responses = {}
self.cookie = None
self.strict = False
def set_strict(self, strict=True):
"""Enables strict mode."""
self.strict = strict
def open(self, request):
"""Logs the request and returns a MockResponse object."""
full_url = request.get_full_url()
if "?" in full_url:
url = full_url[:full_url.find("?")]
else:
url = full_url
if (url != "https://www.google.com/accounts/ClientLogin"
and not url.endswith("_ah/login")):
assert "X-appcfg-api-version" in request.headers
assert "User-agent" in request.headers
request_data = (full_url, bool(request.data))
self.requests.append(request_data)
if self.cookie:
request.headers["Cookie"] = self.cookie
response = self.responses[url](request)
# Use ordered responses in preference to specific response to generic 200.
if url in self.ordered_responses:
logging.debug("Using ordered pre-canned response for: %s" % full_url)
response = self.ordered_responses[url].pop(0)(request)
if not self.ordered_responses[url]:
self.ordered_responses.pop(url)
elif url in self.responses:
logging.debug("Using pre-canned response for: %s" % full_url)
response = self.responses[url](request)
elif self.strict:
raise Exception('No response found for url: %s (%s)' % (url, full_url))
else:
logging.debug("Using generic blank response for: %s" % full_url)
response = TestRpcServerMixin.MockResponse("")
if "Set-Cookie" in response.headers:
self.cookie = response.headers["Set-Cookie"]
# Handle error status codes in the same way as the appengine_rpc openers.
# urllib2 will raise HTTPError for non-2XX status codes, per RFC 2616.
if not (200 <= response.code < 300):
code, msg, hdrs = response.code, response.msg, response.info()
fp = StringIO.StringIO(response.read())
raise urllib2.HTTPError(url=url, code=code, msg=None, hdrs=hdrs, fp=fp)
return response
def AddResponse(self, url, response_func):
"""Calls the provided function when the provided URL is requested.
The provided function should accept a request object and return a
response object.
Args:
url: The URL to trigger on.
response_func: The function to call when the url is requested.
"""
self.responses[url] = response_func
def AddOrderedResponse(self, url, response_func):
"""Calls the provided function when the provided URL is requested.
The provided functions should accept a request object and return a
response object. This response will be added after previously given
responses if they exist.
Args:
url: The URL to trigger on.
response_func: The function to call when the url is requested.
"""
if url not in self.ordered_responses:
self.ordered_responses[url] = []
self.ordered_responses[url].append(response_func)
def AddOrderedResponses(self, url, response_funcs):
"""Calls the provided function when the provided URL is requested.
The provided functions should accept a request object and return a
response object. Each response will be called once.
Args:
url: The URL to trigger on.
response_funcs: A list of response functions.
"""
self.ordered_responses[url] = response_funcs
class TestRpcServer(TestRpcServerMixin, AbstractRpcServer):
pass
class TestHttpRpcServer(TestRpcServerMixin, HttpRpcServer):
pass
class UrlLibRequestResponseStub(object):
def __init__(self, headers=None):
self.headers = {}
if headers:
self.headers = headers
def add_header(self, header, value):
# Note that this does not preserve header order.
# If that's a problem for your tests, add some functionality :)
self.headers[header] = value
class UrlLibRequestStub(UrlLibRequestResponseStub):
pass
class UrlLibResponseStub(UrlLibRequestResponseStub, StringIO.StringIO):
def __init__(self, body, headers, url, code, msg):
UrlLibRequestResponseStub.__init__(self, headers)
if body:
StringIO.StringIO.__init__(self, body)
else:
StringIO.StringIO.__init__(self, "")
self.url = url
self.code = code
self.msg = msg
| bsd-3-clause | -7,914,726,820,681,663,000 | 30.921296 | 86 | 0.663234 | false |
soulweaver91/batchpatch | batchpatch.py | 1 | 27396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Soulweaver'
import argparse
import os
import re
import time
import shutil
import colorama
import subprocess
import unicodedata
import gettext
import zipfile
import zlib
from datetime import datetime
from dateutil import tz
from logger import LogLevel, Logger
class BatchPatch:
PROG_NAME = 'BatchPatch'
PROG_VERSION = '0.3'
PROG_URL = 'https://github.com/soulweaver91/batchpatch'
LOCALE_CATALOG = 'batchpatch'
CRC_BUFFER_SIZE = 65536
logger = None
script_options = {
'script_lang': 'en_US',
'script_name': 'apply'
}
patch_options = {
'filename_pattern': None
}
archive_options = {
'create_zip': False,
'zip_name': 'patch'
}
log_level = LogLevel.notice
xdelta_location = ''
locale_dir = ''
def __init__(self):
colorama.init()
self.xdelta_location = os.path.join(BatchPatch.get_install_path(), 'xdelta3.exe')
self.locale_dir = os.path.join(BatchPatch.get_install_path(), 'i18n')
def print_welcome(self):
# Print this even on the highest levels, but not on silent, and without the log prefix
if self.log_level != LogLevel.silent:
print('{} version {}'.format(self.PROG_NAME, self.PROG_VERSION))
def get_version(self):
return self.PROG_VERSION
def get_name(self):
return self.PROG_NAME
def switch_languages(self, lang):
try:
gettext.translation('batchpatch', self.locale_dir, languages=[lang, 'en_US']).install()
except OSError as e:
self.logger.log('Selecting language {} failed: {}'.format(lang, e.strerror), LogLevel.error)
def run(self):
parser = argparse.ArgumentParser(
description="Generates distribution ready patches for anime batch releases."
)
parser.add_argument(
'-o', '--old',
action='store',
help='The path to the folder with the old files. Required.',
required=True,
metavar='directory'
)
parser.add_argument(
'-n', '--new',
action='store',
help='The path to the folder with the new files. Required.',
required=True,
metavar='directory'
)
parser.add_argument(
'-t', '--target',
action='store',
help='The path where the output should be written to. If not specified, '
'a new date stamped subfolder will be written under the current '
'working directory.',
default=self.get_default_output_folder(),
metavar='directory'
)
parser.add_argument(
'-l', '--loglevel',
action='store',
help='The desired verbosity level. Any messages with the same or higher '
'level than the chosen one will be displayed. '
'Available values: debug (most verbose), notice, warning, error, silent '
'(least verbose, does not print anything). Default: notice.',
choices=[e.name for e in LogLevel],
default='notice',
metavar='level'
)
parser.add_argument(
'-x', '--xdelta',
action='store',
help='An alternative location for the xdelta3 executable to search instead of '
'the same directory as the script.',
default=self.xdelta_location,
metavar='path'
)
parser.add_argument(
'-z', '--zip',
action='store_true',
help='Create a ZIP file out of the created patch files.'
)
parser.add_argument(
'-c', '--check-crc',
action='store_true',
help='Verify CRC values of source and target files, if present.'
)
parser.add_argument(
'--zip-name',
action='store',
help='The filename to save the ZIP with. Only meaningful if -z was set.',
default='patch.zip',
metavar='path'
)
parser.add_argument(
'--script-lang',
action='store',
help='The language to use in the generated script.',
default='en_US',
choices=[d for d in os.listdir(self.locale_dir) if os.path.isdir(os.path.join(self.locale_dir, d))],
metavar='lang_code'
)
parser.add_argument(
'--script-name',
action='store',
help='The filename to use for the generated script, without the extension. \'apply\' by default.',
default='apply',
metavar='name'
)
parser.add_argument(
'--patch-pattern',
action='store',
help='The filename to use for the patch files. Consult README.md for available variables.',
default='{name}{specifier_items[0]}_{ep}_v{v_old}v{v_new}.vcdiff',
metavar='name'
)
parser.add_argument(
'-v', '--version',
action='version',
version="{} version {}".format(self.PROG_NAME, self.PROG_VERSION)
)
args = parser.parse_args()
self.log_level = LogLevel[args.loglevel]
self.logger = Logger(self.log_level)
self.script_options['script_lang'] = args.script_lang
self.script_options['script_name'] = args.script_name
self.patch_options['filename_pattern'] = args.patch_pattern
self.archive_options['zip_name'] = args.zip_name
if args.xdelta is not None:
self.xdelta_location = args.xdelta
self.logger.log('Custom xdelta location \'{}\' read from the command line.'.format(args.xdelta),
LogLevel.debug)
self.print_welcome()
self.check_prerequisites(args)
file_pairs = self.identify_file_pairs_by_name(args.old, args.new)
if len(file_pairs) > 0:
# Sort in alphabetical order for nicer output all around
file_pairs.sort(key=lambda item: item[0])
if args.check_crc:
errors = self.check_crcs(file_pairs)
if len(errors) > 0:
self.logger.log('One or more CRC values did not match, cannot proceed.', LogLevel.error)
return
self.generate_patches(file_pairs, args.target)
self.generate_win_script(file_pairs, args.target)
self.copy_executable(args.target)
if args.zip:
self.create_archive(file_pairs, args.target)
self.logger.log('Done.', LogLevel.notice)
else:
self.logger.log('No files to generate patches for.', LogLevel.notice)
def check_prerequisites(self, args):
self.logger.log('Checking prerequisites.', LogLevel.debug)
for p in ('old', 'new', 'target'):
self.logger.log('Verifying existence of {} directory.'.format(p), LogLevel.debug)
try:
path = getattr(args, p)
except AttributeError:
self.logger.log('Expected parameter \'{}\' was missing!'.format(p), LogLevel.error)
exit()
if not os.path.isdir(path):
if p != 'target':
self.logger.log('{} is not a valid path!'.format(path), LogLevel.error)
exit()
else:
if os.path.exists(path):
self.logger.log('\'{}\' exists and is not a directory!'.format(path), LogLevel.error)
exit()
else:
self.logger.log('Creating output directory \'{}\'.'.format(path), LogLevel.notice)
try:
os.makedirs(path)
except OSError as e:
self.logger.log('Error while creating directory \'{]\': {}'.format(path, e.strerror),
LogLevel.error)
exit()
else:
self.logger.log('\'{}\' was found.'.format(path), LogLevel.debug)
self.logger.log('Verifying a xdelta executable is found from the specified location.', LogLevel.debug)
if not os.path.exists(self.xdelta_location) or not os.path.isfile(self.xdelta_location):
self.logger.log('The xdelta3 executable could not be found at \'{}\'!'.format(self.xdelta_location),
LogLevel.error)
self.logger.log('Please download correct version for your system from the xdelta site or', LogLevel.error)
self.logger.log('compile it yourself, and then add it to the same directory as this script', LogLevel.error)
self.logger.log('under the name xdelta3.exe.', LogLevel.error)
exit()
if not os.access(self.xdelta_location, os.X_OK):
self.logger.log('The xdelta3 executable at \'{}\' doesn\'t have execution permissions!'.format(
self.xdelta_location), LogLevel.error
)
exit()
self.logger.log('Prerequisites OK.', LogLevel.debug)
def check_crcs(self, file_pairs):
errors = []
for pair in file_pairs:
for file in [pair[5], pair[6]]:
if file["crc"] is None:
continue
self.logger.log('Calculating CRC for {}...'.format(os.path.basename(file["filename"])), LogLevel.notice)
with open(file["filename"], 'rb') as f:
buffer = f.read(self.CRC_BUFFER_SIZE)
intermediate = 0
while len(buffer) > 0:
intermediate = zlib.crc32(buffer, intermediate)
buffer = f.read(self.CRC_BUFFER_SIZE)
crc = format(intermediate & 0xFFFFFFFF, '08x')
self.logger.log('CRC is {}, filename says {}.'.format(crc, file["crc"]), LogLevel.notice)
if crc.lower() != file["crc"].lower():
self.logger.log('CRCs don\'t match!', LogLevel.error)
errors.append(file["filename"])
return errors
def generate_patches(self, file_pairs, target_dir):
self.logger.log('Generating patches for {} file pairs.'.format(str(len(file_pairs))), LogLevel.debug)
for pair in file_pairs:
self.logger.log('Creating patch: {} -> {}'.format(pair[0], pair[1]), LogLevel.notice)
effective_source = pair[0]
effective_target = pair[1]
temp_source_name = None
temp_target_name = None
if not pair[4]:
temp_source_name = '~' + os.path.basename(pair[2]) + '.src'
temp_target_name = '~' + os.path.basename(pair[2]) + '.dst'
self.logger.log(('Filename is not safe for xdelta on Windows. Copying files to temporary '
'names {} and {}.').format(temp_source_name, temp_target_name), LogLevel.notice)
shutil.copyfile(pair[0], temp_source_name)
shutil.copyfile(pair[1], temp_target_name)
effective_source = temp_source_name
effective_target = temp_target_name
cmd = [
self.xdelta_location,
'-e', # Create patch
'-9', # Use maximum compression
'-s', # Read from file
effective_source, # Old file
effective_target, # New file
os.path.join(target_dir, pair[2]) # Patch destination
]
if self.log_level.numval <= LogLevel.notice.numval:
# Pass verbose flag to xdelta if using a relatively verbose logging level
cmd.insert(2, '-v')
elif self.log_level.numval == LogLevel.silent.numval:
# Pass quiet flag if using the silent logging level
cmd.insert(2, '-q')
try:
self.logger.log('Starting subprocess, command line: {}'.format(" ".join(cmd)), LogLevel.debug)
ret = subprocess.call(cmd)
if ret != 0:
self.logger.log('xdelta returned a non-zero return value {}! '
'This probably means something went wrong.'.format(str(ret)), LogLevel.warning)
if not pair[4]:
self.logger.log('Removing temporary files.'.format(temp_source_name, temp_target_name),
LogLevel.notice)
os.unlink(temp_source_name)
os.unlink(temp_target_name)
except (OSError, IOError) as e:
self.logger.log('Starting the subprocess failed! ' + e.strerror, LogLevel.warning)
def generate_win_script(self, file_pairs, target_dir):
self.switch_languages(self.script_options['script_lang'])
fh = open(os.path.join(target_dir, self.script_options['script_name'] + '.cmd'),
mode='w', newline='\r\n', encoding='utf-8')
self.logger.log('Generating Windows update script.'.format(str(len(file_pairs))), LogLevel.debug)
fh.write('@echo off\n\n')
fh.write('REM Generated by {} version {}\n'.format(self.PROG_NAME, self.PROG_VERSION))
fh.write('REM on {}\n'.format(datetime.now(tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S %z (%Z)")))
fh.write('REM {}\n\n'.format(self.PROG_URL))
fh.write('setlocal\n')
fh.write('for /f "tokens=2 delims=:." %%x in (\'chcp\') do set cp=%%x\n')
fh.write('chcp 65001 > NUL\n')
fh.write('set pnum=0\n')
fh.write('set nnum=0\n')
fh.write('set fnum=0\n\n')
fh.write('IF NOT EXIST "{}" (\n'.format(os.path.basename(self.xdelta_location)))
fh.write(' echo {msg}\n'.format(
msg=_('The xdelta executable was not found! It is required for this script to work!'))
)
fh.write(' pause\n')
fh.write(' exit /b 1\n')
fh.write(')\n\n')
for pair in file_pairs:
if pair[4]:
fh.write(
(
'IF EXIST "{old}" (\n' +
' IF NOT EXIST "{new}" (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc}...')) +
' set /a pnum+=1\n' +
' "{xdelta}" -d -v -s "{old}" "{patch}" "{new}" || (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc} failed!')) +
' set /a pnum-=1\n' +
' set /a fnum+=1\n' +
' )\n' +
' ) ELSE (\n' +
' echo {msg}\n'.format(msg=_('{new_esc} already exists, skipping...')) +
' set /a nnum+=1\n' +
' )\n' +
') ELSE (\n' +
' echo {msg}\n'.format(msg=_('{old_esc} not present in folder, skipping...')) +
' set /a nnum+=1\n' +
')\n'
).format(
old=os.path.basename(pair[0]),
new=os.path.basename(pair[1]),
patch=os.path.basename(pair[2]),
old_esc=self.cmd_escape(os.path.basename(pair[0])),
new_esc=self.cmd_escape(os.path.basename(pair[1])),
xdelta=os.path.basename(self.xdelta_location)
)
)
else:
fh.write(
(
'IF EXIST "{old}" (\n' +
' IF NOT EXIST "{new}" (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc}...')) +
' set /a pnum+=1\n' +
' REM xdelta unicode incompatibility workaround\n' +
' copy "{old}" "{intermediate_old}" > NUL\n' +
' "{xdelta}" -d -v -s "{intermediate_old}" "{patch}" "{intermediate_new}" || (\n' +
' echo {msg}\n'.format(msg=_('Patching {old_esc} failed!')) +
' set /a pnum-=1\n' +
' set /a fnum+=1\n' +
' )\n' +
' REM xdelta unicode incompatibility workaround\n' +
' move "{intermediate_new}" "{new}" > NUL\n' +
' del "{intermediate_old}" > NUL\n' +
' ) ELSE (\n' +
' echo {msg}\n'.format(msg=_('{new_esc} already exists, skipping...')) +
' set /a nnum+=1\n' +
' )\n' +
') ELSE (\n' +
' echo {msg}\n'.format(msg=_('{old_esc} not present in folder, skipping...')) +
' set /a nnum+=1\n' +
')\n'
).format(
old=os.path.basename(pair[0]),
new=os.path.basename(pair[1]),
intermediate_old=('~' + os.path.basename(pair[2]) + '.src'),
intermediate_new=('~' + os.path.basename(pair[2]) + '.dst'),
patch=os.path.basename(pair[2]),
old_esc=self.cmd_escape(os.path.basename(pair[0])),
new_esc=self.cmd_escape(os.path.basename(pair[1])),
xdelta=os.path.basename(self.xdelta_location)
)
)
fh.write('echo {msg}\n'.format(msg=_('Finished, with %pnum% files patched, %nnum% skipped and %fnum% failed.')))
fh.write('pause\n')
fh.write('chcp %cp% > NUL\n')
fh.close()
self.switch_languages('en_US')
def copy_executable(self, target_dir):
self.logger.log('Copying xdelta to the target folder {}.'.format(target_dir), LogLevel.debug)
shutil.copy(os.path.join(os.getcwd(), self.xdelta_location),
os.path.join(target_dir, os.path.basename(self.xdelta_location)))
def create_archive(self, file_pairs, target_dir):
zip_path = os.path.join(target_dir, self.archive_options['zip_name'])
self.logger.log('Creating a ZIP archive of the patch to \'{}\'.'.format(zip_path), LogLevel.debug)
zipped = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for pair in file_pairs:
self.logger.log('Writing: {}...'.format(pair[2]), LogLevel.debug)
zipped.write(os.path.join(target_dir, pair[2]), pair[2])
self.logger.log('Writing the patch script...', LogLevel.debug)
zipped.write(os.path.join(target_dir, self.script_options['script_name'] + '.cmd'),
self.script_options['script_name'] + '.cmd')
self.logger.log('Writing the executable...', LogLevel.debug)
zipped.write(os.path.join(target_dir, os.path.basename(self.xdelta_location)),
os.path.basename(self.xdelta_location))
zipped.close()
def identify_file_pairs_by_name(self, old_dir, new_dir):
self.logger.log('Identifying potential file pairs for patching.', LogLevel.debug)
old_files = os.listdir(str(old_dir))
new_files = os.listdir(str(new_dir))
filemap = {}
for file in [self.create_file_entity(f, old_dir) for f in old_files]:
if file is not None:
self.logger.log('Found potential source file: {}'.format(file['filename']), LogLevel.debug)
self.logger.log(' Group {}, series {}, type {} {}, episode {}, version {}'.format(
file['group'],
file['name'],
file['specifier'],
file['ext'],
file['ep'],
file['ver']
), LogLevel.debug)
key = file.get('key')
if key in filemap:
filemap[key][0].append(file)
else:
filemap[key] = ([file], [])
for file in [self.create_file_entity(f, new_dir) for f in new_files]:
if file is not None:
key = file.get('key')
if key in filemap:
self.logger.log('Found potential target file: {}'.format(file['filename']), LogLevel.debug)
self.logger.log(' Group {}, series {}, type {} {}, episode {}, version {}'.format(
file['group'],
file['name'],
file['specifier'],
file['ext'],
file['ep'],
file['ver']
), LogLevel.debug)
filemap[key][1].append(file)
else:
# There were no matching files in the old directory, so this won't be a candidate for patching.
self.logger.log('Ignoring target file with no equivalent source: {}'.format(file['filename']),
LogLevel.debug)
# Let's prune those source files that were found that have no target equivalents.
item_cnt = len(filemap)
filemap = {k: v for (k, v) in filemap.items() if len(v[1]) >= 1}
if len(filemap) < item_cnt:
diff = item_cnt - len(filemap)
self.logger.log('Dropped {} source candidate{} with no equivalent targets.'.format(
str(diff), '' if diff == 1 else 's'), LogLevel.debug)
resolved_relations = []
for key, group in filemap.items():
highest_source = max(group[0], key=lambda x: x['ver'])
highest_target = max(group[1], key=lambda x: x['ver'])
if highest_source['ver'] == highest_target['ver']:
self.logger.log('Source and target versions of {} are both {}, ignoring the group.'.format(
key, highest_target['ver']
), LogLevel.debug)
continue
patch_name = self.get_patch_name(highest_source, highest_target)
# TODO: refactor, these are too complex and confusing to be tuples anymore
resolved_relations.append((highest_source['filename'], highest_target['filename'], patch_name,
highest_target['key'],
self.is_name_windows_safe(os.path.basename(highest_source['filename'])) and
self.is_name_windows_safe(os.path.basename(highest_target['filename'])),
highest_source,
highest_target))
self.logger.log('Queued: {} -> {}, patch name: {}'.format(
highest_source['filename'], highest_target['filename'], patch_name
), LogLevel.debug)
return resolved_relations
@staticmethod
def cmd_escape(s):
return re.sub(r'([\[\]\(\)^<>|])', r'^\1', s)
def get_patch_name(self, source, target):
try:
return self.patch_options['filename_pattern'].format(
raw_group=source['group'],
raw_name=source['name'],
raw_ep=source['ep'],
raw_specifier=source['specifier'],
raw_ext=source['ext'],
group=BatchPatch.neutralize_str(source['group']),
name=BatchPatch.neutralize_str(source['name']),
ep=BatchPatch.neutralize_str(source['ep']),
specifier=BatchPatch.neutralize_str(source['specifier']),
specifier_items=[BatchPatch.neutralize_str(s) for s in (
source['specifier'].split() if len(source['specifier']) > 0 else ['']
)],
type=BatchPatch.neutralize_str(source['specifier'] + source['ext']),
ext=BatchPatch.neutralize_str(source['ext']),
v_old=source['ver'],
v_new=target['ver'],
hash_old=source['crc'],
hash_new=target['crc']
)
except KeyError as e:
self.logger.log('Invalid variable {} in patch name pattern!'.format(e.args[0]), LogLevel.error)
exit()
@staticmethod
def create_file_entity(filename, basedir):
matcher = re.compile('(?#1. Group shortname)(?:\[([^\]]+?)\] )?'
'(?#2. Main name)(.+?)'
'(?#3. Episode specifier)(?: - ([a-zA-Z]*\d*))?'
'(?#4. Version specifier)(?:v(\d*))?'
'(?#5. Other specifiers)(?: \(([^\)]*)\))?'
'(?#6. CRC hash)(?: \[([0-9a-fA-F]{8})\])?'
'(?# Eat all extension-looking parts except the last one)(?:\..+)?'
'\.'
'(?# Do not match torrents)(?!torrent$)'
'(?#7. Get the file extension)([^\.]+)$')
match = matcher.match(filename)
if match:
path = os.path.join(basedir, match.group(0))
ver = match.group(4)
if ver is None:
ver = 1
specifier = match.group(5)
if specifier is None:
specifier = ''
return {
"key": "/".join([match.group(x) for x in [1, 2, 3, 5, 7] if isinstance(match.group(x), str)]),
"ver": int(ver),
"group": match.group(1),
"name": match.group(2),
"ep": match.group(3),
"specifier": specifier,
"crc": match.group(6),
"ext": match.group(7),
"filename": path
}
else:
return None
@staticmethod
def get_default_output_folder():
return os.path.join(os.getcwd(), 'batch-' + time.strftime('%Y-%m-%d-%H-%M'))
@staticmethod
def neutralize_str(name):
s = unicodedata.normalize('NFKD', name)
s = u"".join([c for c in s if not unicodedata.combining(c)])
return re.sub(r'[^a-z0-9_-]', '_', s.casefold())
unsafe_windows_filenames = [
'CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
]
@staticmethod
def is_name_windows_safe(name):
""" Verifies if the filename can be passed to xdelta on Windows. The user's codepage can be whatever,
so only accept a subset of the 7-bit ASCII as safe values.
"""
return name not in BatchPatch.unsafe_windows_filenames and \
name == re.sub(r'[^ !#$%&()+,\-.0-9;=@A-Z\[\]^_`a-z{}]', r'', name)
@staticmethod
def get_install_path():
return os.path.dirname(os.path.realpath(__file__))
if __name__ == "__main__":
gettext.install('batchpatch', os.path.join(BatchPatch.get_install_path(), 'i18n'))
prog = BatchPatch()
prog.run()
| mit | -1,857,567,898,504,627,200 | 42.974318 | 120 | 0.501643 | false |
dionisos2/CI | src/centres_of_interest_manager.py | 1 | 14888 | """
See CentresOfInterestManager class
"""
import re
import mylib.checking as checking
from mylib.string_op import replace_special_char
from mylib.notifier import Notifier
from xml.dom import minidom
from lxml import etree
from centre_of_interest import CentreOfInterest
def identity(x):
return x
class CentresOfInterestManager:
"""
Class that permit to create/load lists of ci(center of interest),
and to export them in different formats.
"""
def __init__(self, list_of_ci=None, notifier=None):
assert not(list_of_ci) or\
checking.is_all_instance(list_of_ci, CentreOfInterest)
self.ci_dtd = "ci.dtd"
self.ci_graph_dtd = "ci_graph.dtd"
# Templates html tags
self.html_start_list = "<ul>\n"
self.html_end_list = "</ul>\n"
self.html_date = "<h2>{date}</h2>\n"
self.html_item = '<li><a href="{url}">{name}</a></li>\n'
# Templates graphviz tags
self.dot_start_graph = "digraph CI {\n" +\
" node [fontcolor=red, fontsize=8];\n"
self.dot_end_graph = "}"
self.dot_official_item = ' "{name_official}"[URL="{url}", style=filled, fillcolor="0 0 0"];\n'
self.dot_unofficial_item = ' "{name_unofficial}"[URL="{url}", style=filled, fillcolor="0 0 0"];\n'
self.dot_without_url_item = ' "{name_without_url}"[style=filled, fillcolor="0 0 0"];\n'
self.dot_item_child = ' "{name_official}"->"{child}";\n'
if notifier is not None:
assert isinstance(notifier, Notifier)
self._only_official = False
self._notifier = notifier
if list_of_ci is None:
self._list_of_ci = []
else:
self._list_of_ci = list_of_ci
def notify(self, text):
"""
notify something to the user (use the Notifier object)
"""
if self._notifier is not None:
self._notifier.notify(text)
def __iter__(self):
for centre_of_interest in self._list_of_ci:
yield centre_of_interest
def __len__(self):
return len(self._list_of_ci)
@property
def list_of_ci(self):
""" get the list of ci managed """
return self._list_of_ci
def append(self, centre_of_interest):
""" add a new centre of interest to be managed """
assert isinstance(centre_of_interest, CentreOfInterest)
self._list_of_ci.append(centre_of_interest)
def __str__(self):
tmp = ""
for centre_of_interest in self._list_of_ci:
tmp += str(centre_of_interest)
return tmp
def find(self, ci_name):
""" find a centre of interest by name """
assert isinstance(ci_name, str)
for centre_of_interest in self:
if centre_of_interest.name == ci_name:
return centre_of_interest
return None
def verify_xml(self, xml_file_path, dtd_file_path):
with open(dtd_file_path, 'r', encoding='utf-8') as dtd_file:
with open(xml_file_path, 'r', encoding='utf-8') as xml_file:
dtd = etree.DTD(dtd_file)
root = etree.parse(xml_file)
if not dtd.validate(root):
raise IOError('Not valide according to "' + dtd_file_path +
'"\n' +
str(dtd.error_log.filter_from_errors()[0]))
def delete_unwanted_ci(self):
if self._only_official:
self._list_of_ci = [ci for ci in self._list_of_ci if ci.official]
for ci in self._list_of_ci:
ci.children = [child for child in ci.children if child.official]
def load_xml(self, xml_file, only_official=False, with_link=True):
""" load all the centres of interest from a xml file """
self.notify('load xml_file "' + xml_file + '"')
self.verify_xml(xml_file, self.ci_dtd)
self._list_of_ci = []
self._only_official = only_official
doc = minidom.parse(xml_file)
for ci_node in doc.documentElement.getElementsByTagName("CI"):
name = self._get_element(ci_node, "name")
if with_link:
#url == None, if the <url> balise is empty
url = self._get_element(ci_node, "url")
else:
url = ''
date = self._get_element(ci_node, "date")
official = self._get_element(ci_node, "official")
centre_of_interest = CentreOfInterest(name, url, date)
centre_of_interest.official = official
self.append(centre_of_interest)
def load_children(self, ci_graph_file):
"""
Make the link between the centres of interest and their children
"""
self.verify_xml(ci_graph_file, self.ci_graph_dtd)
doc = minidom.parse(ci_graph_file)
for ci_node in doc.documentElement.getElementsByTagName("CI"):
ci_name = ci_node.getElementsByTagName("name")[0].firstChild.nodeValue
centre_of_interest = self.find(ci_name)
if centre_of_interest is None:
raise ValueError('"' + ci_name + '" found in "' +
ci_graph_file + '" doesn\'t exist in ci.xml')
children_node = ci_node.getElementsByTagName("children")[0]
child_nodes = children_node.getElementsByTagName("child")
for child in child_nodes:
if child.firstChild is None:
raise ValueError("void child balise in '" + ci_name + "'")
else:
child_name = child.firstChild.nodeValue
child_ci = self.find(child_name)
if child_ci is not None:
centre_of_interest.add_child(child_ci)
else:
raise ValueError("try to add the child : '" +
child_name +
"' to '" +
ci_name +
"' but the child was not found")
@classmethod
def _get_element(cls, ci_node, element):
"""
Get the element 'element', of the centre of interest node 'ci_node'
"""
node = ci_node.getElementsByTagName(element)[0]
if node.firstChild is None:
return None
else:
return node.firstChild.nodeValue
def sorted_by_name(self, translate=None):
"""
Return the list of CI sorted by name.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
"""
if translate is not None:
return sorted(self._list_of_ci, key=lambda ci: translate(ci.name))
else:
return sorted(self._list_of_ci, key=lambda ci: ci.name)
def sorted_by_date(self, translate=None):
"""
Return the list of CI sorted by date.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
"""
if translate is None:
translate = identity
def get_date_name(centre_of_interest):
""" return a couple (ci_date, ci_name), to sort the list """
if centre_of_interest.date is not None:
return (centre_of_interest.date,
translate(centre_of_interest.name))
else:
return ("", translate(centre_of_interest.name))
return sorted(self._list_of_ci, key=get_date_name)
def load_template_dot(self, dot_file_path):
self.notify('load dot template file "' + dot_file_path + '"')
def get_match(match, message):
if not match:
raise IOError(message)
else:
return match.group(1)
with open(dot_file_path, 'r', encoding='utf-8') as dot_file:
template = dot_file.read()
start_graph = re.search(r'^(.*)// official ci start',
template,
re.DOTALL)
self.dot_start_graph = get_match(start_graph,
"Incorrect dot template, can’t find start")
end_graph = re.search(r'// child end(.*)$', template, re.DOTALL)
self.dot_end_graph = get_match(end_graph, "Incorrect dot template, can’t find end")
official_item = re.search(r'// official ci start(.*)// official ci end',
template,
re.DOTALL)
self.dot_official_item = get_match(official_item,
"Incorrect dot template, can’t find official ci item")
unofficial_item = re.search(r'// unofficial ci start(.*)// unofficial ci end',
template,
re.DOTALL)
self.dot_unofficial_item = get_match(unofficial_item,
"Incorrect dot template, can’t find unofficial ci item")
without_url_item = re.search(r'// without_url start(.*)// without_url end',
template,
re.DOTALL)
self.dot_without_url_item = get_match(without_url_item,
"Incorrect dot template, can’t find without url ci item")
item_child = re.search(r'// child start(.*)// child end',
template,
re.DOTALL)
self.dot_item_child = get_match(item_child,
"Incorrect dot template, can’t find child ci item")
def load_template_html(self, html_file_path):
self.notify('load html template file "' + html_file_path + '"')
with open(html_file_path, 'r', encoding='utf-8') as html_file:
template = html_file.read()
start_list = re.search(r'^(.*)<!-- date -->', template, re.DOTALL)
if not start_list:
raise IOError("Incorrect html template, can’t find start")
else:
self.html_start_list = start_list.group(1)
end_list = re.search(r'<!-- /item -->(.*)$', template, re.DOTALL)
if not end_list:
raise IOError("Incorrect html template, can’t find end")
else:
self.html_end_list = end_list.group(1)
date = re.search(r'<!-- date -->(.*)<!-- /date -->',
template,
re.DOTALL)
if not date:
raise IOError("Incorrect html template, can’t find date")
else:
self.html_date = date.group(1)
item = re.search(r'<!-- item -->(.*)<!-- /item -->',
template,
re.DOTALL)
if not item:
raise IOError("Incorrect html template, can’t find item")
else:
self.html_item = item.group(1)
def to_html_list(self, order="by_name", translate=None):
"""
Export the sorted list of CI to html.
:param order: choose "by_name" to sort by name and "by_date" to sort by date
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type order: str
:type translate: function
:return: return a string corresponding of the html page
"""
self.delete_unwanted_ci()
if translate is None:
translate = identity
string = self.html_start_list
if order == "by_name":
sorted_list_of_ci = self.sorted_by_name(translate)
elif order == "by_date":
sorted_list_of_ci = self.sorted_by_date(translate)
else:
raise ValueError("order should be 'by_name', or 'by_date'. '" +
order +
"' given.")
if (order == "by_date")and(len(sorted_list_of_ci) > 0):
date = sorted_list_of_ci[0].date
if date is not None:
str_date = date
else:
str_date = "unknown"
string += self.html_date.replace('{date}', str_date)
for centre_of_interest in sorted_list_of_ci:
if (order == "by_date")and(centre_of_interest.date != date):
date = centre_of_interest.date
if date is not None:
str_date = date
else:
str_date = "unknown"
string += self.html_date.replace('{date}', str_date)
if centre_of_interest.url is not None:
item = self.html_item.replace('{url}', centre_of_interest.url)
item = item.replace('{name}',
translate(centre_of_interest.name))
string += item
string += self.html_end_list
return string
def to_graphviz(self, ci_graph_file, translate=None):
"""
Export the sorted list of CI to a graphviz dot format.
:param translate: a function used to translate the CI name,
translate(ci_name)=ci_name_translated
:type translate: function
:return: return a string corresponding of the dot file
"""
self.load_children(ci_graph_file)
self.delete_unwanted_ci()
if translate is None:
translate = identity
string = self.dot_start_graph
for centre_of_interest in self:
if centre_of_interest.url is None or centre_of_interest.url == '':
dot_template = self.dot_without_url_item
else:
if centre_of_interest.official:
dot_template = self.dot_official_item
else:
dot_template = self.dot_unofficial_item
item_name = translate(centre_of_interest.name)
item = re.sub(r'{name.*?}', item_name, dot_template)
if centre_of_interest.url is not None:
item = re.sub(r'{url}', centre_of_interest.url, item)
string += item
for child in centre_of_interest.children:
item_child = re.sub(r'{name.*?}', item_name,
self.dot_item_child)
item_child = re.sub(r'{child}', translate(child.name),
item_child)
string += item_child
string += self.dot_end_graph
return replace_special_char(string)
| gpl-2.0 | -8,393,297,476,626,185,000 | 37.921466 | 107 | 0.52159 | false |
skorokithakis/django-fancy-cache | fancy_tests/tests/test_views.py | 1 | 8032 | import unittest
import re
from nose.tools import eq_, ok_
from django.test.client import RequestFactory
from django.core.cache import cache
from fancy_cache.memory import find_urls
from . import views
class TestViews(unittest.TestCase):
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_render_home1(self):
request = self.factory.get('/anything')
response = views.home(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
# do it again
response = views.home(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_1, random_string_2)
def test_render_home2(self):
authenticated = RequestFactory(AUTH_USER='peter')
request = self.factory.get('/2')
response = views.home2(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
# do it again
response = views.home2(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_1, random_string_2)
# do it again, but with a hint to disable cache
request = authenticated.get('/2')
response = views.home2(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
def test_render_home3(self):
request = self.factory.get('/anything')
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
extra_random_1 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
extra_random_2 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
eq_(random_string_1, random_string_2)
# the post_process_response is only called once
eq_(extra_random_1, extra_random_2)
def test_render_home3_no_cache(self):
factory = RequestFactory(AUTH_USER='peter')
request = factory.get('/3')
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
ok_('In your HTML' not in response.content.decode("utf8"))
def test_render_home4(self):
request = self.factory.get('/4')
response = views.home4(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
extra_random_1 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
response = views.home4(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
extra_random_2 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
eq_(random_string_1, random_string_2)
# the post_process_response is now called every time
ok_(extra_random_1 != extra_random_2)
def test_render_home5(self):
request = self.factory.get('/4', {'foo': 'bar'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
request = self.factory.get('/4', {'foo': 'baz'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
request = self.factory.get('/4', {'foo': 'baz', 'other': 'junk'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_3 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_2, random_string_3)
def test_render_home5bis(self):
request = self.factory.get('/4', {'foo': 'bar'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
request = self.factory.get('/4', {'foo': 'baz'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
request = self.factory.get('/4', {'foo': 'baz', 'bar': 'foo'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_3 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_2, random_string_3)
def test_remember_stats_all_urls(self):
request = self.factory.get('/anything')
response = views.home6(request)
eq_(response.status_code, 200)
# now ask the memory thing
match, = find_urls(urls=['/anything'])
eq_(match[0], '/anything')
eq_(match[2]['hits'], 0)
eq_(match[2]['misses'], 1)
# second time
response = views.home6(request)
eq_(response.status_code, 200)
match, = find_urls(urls=['/anything'])
eq_(match[0], '/anything')
eq_(match[2]['hits'], 1)
eq_(match[2]['misses'], 1)
def test_remember_stats_all_urls_looong_url(self):
request = self.factory.get(
'/something/really/long/to/start/with/right/here/since/this/will/'
'test/that/things/work/with/long/urls/too',
{
'line1': 'Bad luck, wind been blowing at my back',
'line2': "I was born to bring trouble to wherever I'm at",
'line3': "Got the number thirteen, tattooed on my neck",
'line4': "When the ink starts to itch, ",
'line5': "then the black will turn to red",
}
)
response = views.home6(request)
eq_(response.status_code, 200)
# now ask the memory thing
match, = find_urls()
ok_(match[0].startswith('/something/really'))
eq_(match[2]['hits'], 0)
eq_(match[2]['misses'], 1)
# second time
response = views.home6(request)
eq_(response.status_code, 200)
match, = find_urls([])
ok_(match[0].startswith('/something/really'))
eq_(match[2]['hits'], 1)
eq_(match[2]['misses'], 1)
| bsd-3-clause | 6,376,716,639,639,333,000 | 40.833333 | 93 | 0.593252 | false |
ProteinDF/QCLObot | tests/test_amberobject.py | 1 | 1267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import cProfile
from pstats import Stats
from qclobot.amberobject import AmberObject
from qclobot.utils import get_model
import proteindf_bridge as bridge
class TestAmberObject(unittest.TestCase):
def setUp(self):
pass
#self.pr = cProfile.Profile()
#self.pr.enable()
pdb = bridge.Pdb("./data/sample/4tut.addH.pdb")
models = pdb.get_atomgroup()
self.model = get_model(models)
def tearDown(self):
#p = Stats (self.pr)
#p.strip_dirs()
#p.sort_stats ('cumtime')
#p.print_stats()
pass
def test_opt(self):
self.amber_obj = AmberObject("test_amber_opt")
self.amber_obj.model = self.model
self.amber_obj.opt()
def test_md(self):
self.amber_obj = AmberObject("test_amber_md")
self.amber_obj.model = self.model
self.amber_obj.md(steps=100, dt=0.002)
def test_suite():
"""
builds the test suite.
"""
def _suite(test_class):
return unittest.makeSuite(test_class)
suite = unittest.TestSuite()
suite.addTests(_suite(TestAmberObject))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'test_suite')
| gpl-3.0 | 2,612,895,859,873,137,000 | 23.365385 | 55 | 0.616417 | false |
sanja7s/SR_Twitter | src_general/explain_FORMATION_DELETION_REL.py | 1 | 6415 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from pylab import *
width = 0.28 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
fig = gcf()
def plot_bars_FORMATION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 0))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='darkred', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='lightcoral', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Formed and persisting', \
'Formed and non-persisting', 'Persisting average'),\
frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At formation', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
N = 3
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
formationDeletionMeans = (1.12747979427, 1.56808719079, 1.62160176341)
formationDeletionStd = (1.35650452374, 1.71205560699, 1.83913259462)
# PERSISTING LINKS
# STRONG contacts REL
formationNodeletionMeans = (0.964889222681, 1.44874202028, 1.68794592565)
formationNodeletionStd = (1.30256068643, 1.64860382968, 1.94388833634)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_FORMATION_STRONG_REL(formationNodeletionMeans, formationNodeletionStd,\
formationDeletionMeans, formationDeletionStd, SRMeansS, SRStdS)
def plot_bars_DELETION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 1))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='c', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='cyan', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Persisting decommissioned', \
'Non-persisting decommissioned', 'Persisting average'),\
loc='best',frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
#deletionFormationMeans = (1.35860783095, 1.40335612181, 1.38222498446)
#deletionFormationStd = (1.39698763227, 1.515042018, 1.6001731639)
deletionFormationMeans = (1.21614009307, 1.58645603723, 1.613397012)
deletionFormationStd = (1.39228801763, 1.73298601092, 1.84822380219)
# PERSISTING LINKS
#deletionNoformationMeans = (1.16101995042, 1.52591193484, 1.54066816196)
#deletionNoformationStd = (1.36105887603, 1.69996084625, 1.80123581372)
deletionNoformationMeans = (1.09195402299, 1.16457680251, 1.09717868339)
deletionNoformationStd = (1.25857893939, 1.33146910699, 1.31900439894)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_DELETION_STRONG_REL(deletionNoformationMeans, deletionNoformationStd,\
deletionFormationMeans, deletionFormationStd, SRMeansS, SRStdS)
##########################################################################
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(12.4,4.5)
plt.tight_layout()
#plt.figtext(0.20, 0.49, 'Relative status of the pair: weak contacts')
#plt.figtext(0.27, 0.973, 'Relative status of the pair: strong contacts')
fig.suptitle('Relative status (strong contacts)', verticalalignment='center', horizontalalignment='center', size = 16)
#fig.suptitle('Sum including weak contacts', verticalalignment='center', y=0.5, horizontalalignment='center', size = 16)
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/explain_FORMATION_DELETION_REL.eps", dpi=710)
| mit | -5,203,250,858,654,813,000 | 33.304813 | 120 | 0.677631 | false |
DavidCPhillips/spfjs | configure.py | 1 | 26384 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by The MIT License.
# See the LICENSE file for details.
"""Script that generates the build.ninja file for SPF."""
__author__ = '[email protected] (Alex Nicksay)'
import errno
import distutils.version
import glob
import os
import shutil
import subprocess
import sys
import urllib
import zipfile
def check_requirements():
# Closure Compiler after v20131014 requires Java 7.
required_java = distutils.version.LooseVersion('1.7')
try:
cmd = subprocess.Popen(['java', '-version'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
except OSError:
print('ERROR: Unable to find java.')
print('Please install java and try again.')
sys.exit(1)
out = cmd.stdout.readlines()
if len(out) <= 0:
print('ERROR: Unable to get java version.')
print('Please ensure java is properly installed and try again.')
sys.exit(1)
version_string = out[0].split(' ')[-1].strip('\n"')
installed_java = distutils.version.LooseVersion(version_string)
if installed_java < required_java:
print('ERROR: Installed java version "%s" is less than the required "%s".' %
(installed_java, required_java))
print('Please upgrade java and try again.')
sys.exit(1)
def fetch_dependencies():
# Ninja v1.5.1
ninja_dir = 'vendor/ninja'
ninja_syntax = 'vendor/ninja/misc/ninja_syntax.py'
ninja_syntax_url = 'https://github.com/martine/ninja/archive/v1.5.1.zip'
ninja_syntax_zip = 'vendor/ninja/v1.5.1.zip'
ninja_binary = 'vendor/ninja/ninja'
if 'darwin' in sys.platform:
ninja_binary_url = 'https://github.com/martine/ninja/releases/download/v1.5.1/ninja-mac.zip'
ninja_binary_zip = 'vendor/ninja/ninja-mac.zip'
elif 'win' in sys.platform:
ninja_binary_url = 'https://github.com/martine/ninja/releases/download/v1.5.1/ninja-win.zip'
ninja_binary_zip = 'vendor/ninja/ninja-win.zip'
else:
ninja_binary_url = 'https://github.com/martine/ninja/releases/download/v1.5.1/ninja-linux.zip'
ninja_binary_zip = 'vendor/ninja/ninja-linux.zip'
if not os.path.exists(ninja_dir):
try:
os.makedirs(ninja_dir)
except OSError:
print('ERROR: Could not create the Ninja directory.')
print('Please run "mkdir %s" manually and try again.' % ninja_dir)
sys.exit(1)
if not os.path.exists(ninja_syntax_zip):
print('Downloading Ninja syntax...')
try:
urllib.urlretrieve(ninja_syntax_url, ninja_syntax_zip)
except (IOError, urllib.ContentTooShortError):
print('ERROR: Unable to download Ninja syntax zip file.')
print('Please download "%s" to "%s" and try again.' %
(ninja_syntax_url, ninja_syntax_zip))
sys.exit(1)
if not os.path.exists(ninja_binary_zip):
print('Downloading Ninja binary...')
try:
urllib.urlretrieve(ninja_binary_url, ninja_binary_zip)
except (IOError, urllib.ContentTooShortError):
print('ERROR: Unable to download Ninja binary zip file.')
print('Please download "%s" to "%s" and try again.' %
(ninja_binary_url, ninja_binary_zip))
sys.exit(1)
if not os.path.exists(ninja_syntax):
try:
if not os.path.exists(os.path.dirname(ninja_syntax)):
os.makedirs(os.path.dirname(ninja_syntax))
with zipfile.ZipFile(ninja_syntax_zip) as zf:
# ZipFile.extract is simpler, but manually opening and copying
# the file objects enables removing the version prefix.
with zf.open('ninja-1.5.1/misc/ninja_syntax.py') as src:
with open(ninja_syntax, 'w') as out:
shutil.copyfileobj(src, out)
with zf.open('ninja-1.5.1/README') as src:
with open(os.path.join(ninja_dir, 'README'), 'w') as out:
shutil.copyfileobj(src, out)
with zf.open('ninja-1.5.1/COPYING') as src:
with open(os.path.join(ninja_dir, 'COPYING'), 'w') as out:
shutil.copyfileobj(src, out)
except (OSError, IOError, RuntimeError, zipfile.BadZipfile,
zipfile.LargeZipFile):
print('ERROR: Unable to unzip Ninja syntax zip file.')
print('Please delete "%s" and try again.' % ninja_syntax_zip)
if not os.path.exists(ninja_binary):
try:
with zipfile.ZipFile(ninja_binary_zip) as zf:
zf.extract('ninja', ninja_dir)
os.chmod(ninja_binary, 0755)
except (OSError, IOError, RuntimeError, zipfile.BadZipfile,
zipfile.LargeZipFile):
print('ERROR: Unable to unzip Ninja syntax zip file.')
print('Please delete "%s" and try again.' % ninja_syntax_zip)
def find_js_sources():
sources = ['src/client/stub.js']
for root, dirs, files in os.walk('src/client'):
if root.endswith('testing'):
continue
for file in files:
if file.endswith('.js'):
if file.startswith('stub') or file.endswith('test.js'):
continue
sources.append(os.path.join(root, file))
return sources
def find_js_tests():
tests = []
for root, dirs, files in os.walk('src/client'):
for file in files:
if file.endswith('test.js'):
tests.append(os.path.join(root, file))
return tests
def find_demo_sources():
sources = []
for root, dirs, files in os.walk('src/server/demo'):
for file in files:
if os.path.splitext(file)[1]: # Only grab source files.
sources.append(os.path.join(root, file))
return sources
def create_ninja_file():
# Do this during execution to allow downloading the syntax file first.
sys.path.insert(0, 'vendor/ninja/misc')
import ninja_syntax
os.chdir(os.path.dirname(os.path.abspath(__file__)))
buildfile = open('build.ninja', 'w')
return ninja_syntax.Writer(buildfile)
def write_header(ninja):
ninja.comment('Copyright 2014 Google Inc. All rights reserved.')
ninja.newline()
ninja.comment('Use of this source code is governed by The MIT License.')
ninja.comment('See the LICENSE file for details.')
ninja.newline()
ninja.comment('This generated file is used to build SPF.')
ninja.comment('To update, run %s.' % os.path.basename(__file__))
ninja.newline()
def write_variables(ninja):
ninja.variable('builddir', 'build')
ninja.variable('jscompiler_jar', 'vendor/closure-compiler/compiler.jar')
ninja.variable('jslinter_bin', 'vendor/closure-linter/bin/gjslint')
ninja.variable('jslinter_dir', 'vendor/closure-linter')
ninja.variable('jsfixer_bin', 'vendor/closure-linter/bin/fixjsstyle')
ninja.variable('jsfixer_dir', 'vendor/closure-linter')
ninja.variable('license_js', 'src/license.js')
ninja.variable('license', 'cat $license_js')
ninja.variable('preamble', 'true')
ninja.variable('wrapper_js', 'src/wrapper.js')
common_jsflags = [
'--compilation_level ADVANCED_OPTIMIZATIONS',
'--define "COMPILED=true"',
'--manage_closure_dependencies true',
'--process_closure_primitives true',
]
prod_jsflags = common_jsflags + [
'--define "SPF_DEBUG=false"',
'--summary_detail_level 3',
'--warning_level VERBOSE',
'--jscomp_error accessControls',
'--jscomp_error ambiguousFunctionDecl',
'--jscomp_error checkEventfulObjectDisposal',
'--jscomp_error checkRegExp',
'--jscomp_error checkStructDictInheritance',
'--jscomp_error checkTypes',
'--jscomp_error checkVars',
'--jscomp_error const',
'--jscomp_error constantProperty',
'--jscomp_error deprecated',
'--jscomp_error duplicateMessage',
'--jscomp_error es3',
'--jscomp_error es5Strict',
'--jscomp_error externsValidation',
'--jscomp_error fileoverviewTags',
'--jscomp_error globalThis',
'--jscomp_error internetExplorerChecks',
'--jscomp_error invalidCasts',
'--jscomp_error misplacedTypeAnnotation',
'--jscomp_error missingGetCssName',
'--jscomp_error missingProperties',
'--jscomp_error missingProvide',
'--jscomp_error missingRequire',
'--jscomp_error missingReturn',
'--jscomp_error newCheckTypes',
'--jscomp_error nonStandardJsDocs',
'--jscomp_error suspiciousCode',
'--jscomp_error strictModuleDepCheck',
'--jscomp_error typeInvalidation',
'--jscomp_error undefinedNames',
'--jscomp_error undefinedVars',
'--jscomp_error unknownDefines',
'--jscomp_error uselessCode',
'--jscomp_error useOfGoogBase',
'--jscomp_error visibility',
]
debug_jsflags = common_jsflags + [
'--debug true',
'--formatting PRETTY_PRINT',
]
trace_jsflags = common_jsflags + [
'--define "SPF_DEBUG=false"',
'--define "SPF_TRACING=true"',
]
dev_jsflags = [
'--compilation_level WHITESPACE_ONLY',
'--formatting PRETTY_PRINT',
'--manage_closure_dependencies true',
'--closure_entry_point spf.main',
]
main_jsflags = [
'--closure_entry_point spf.main',
'--output_wrapper_file $wrapper_js',
]
bootloader_jsflags = [
'--closure_entry_point spf.bootloader',
'--output_wrapper "(function(){%output%})();"',
]
ninja.variable('prod_jsflags', ' '.join(prod_jsflags))
ninja.variable('debug_jsflags', ' '.join(debug_jsflags))
ninja.variable('trace_jsflags', ' '.join(trace_jsflags))
ninja.variable('dev_jsflags', ' '.join(dev_jsflags))
ninja.variable('main_jsflags', ' '.join(main_jsflags))
ninja.variable('bootloader_jsflags', ' '.join(bootloader_jsflags))
ninja.newline()
def write_rules(ninja):
ninja.newline()
ninja.comment('Build JS files.');
ninja.rule('jscompile',
command='$license > $out '
'&& $preamble >> $out '
'&& java -jar $jscompiler_jar $flags $in >> $out '
'|| (rm $out; false)',
description='jscompile $out')
ninja.newline()
ninja.comment('Lint and fix JS files.')
ninja.rule('jslint',
command='export PYTHONUSERBASE="$jslinter_dir" '
'&& python $jslinter_bin '
' $flags $in',
description='jslint $in')
ninja.rule('jsfix',
command='export PYTHONUSERBASE="$jsfixer_dir" '
'&& python $jsfixer_bin '
' $flags $in',
description='jsfix $in')
ninja.newline()
ninja.comment('Build the build file.')
ninja.rule('configure',
command='python ./configure.py',
generator=True)
ninja.newline()
ninja.comment('Symlink.')
ninja.rule('symlink',
command='ln -sf $prefix$in $out',
description='symlink $prefix$in -> $out')
ninja.newline()
ninja.comment('Download files.')
ninja.rule('download',
command='curl -L $url -o $out',
generator=True,
description='download $url -> $out')
ninja.newline()
ninja.comment('Unpack files.')
ninja.rule('unzip',
command='unzip -u $flags $in $paths -x $exclude -d $dest',
restat=True,
description='unzip $in -> $dest')
ninja.rule('untgz',
command='tar -xmz -f $in $flags -C $dest --exclude $exclude',
description='untgz $in -> $dest')
ninja.newline()
ninja.comment('Generate test manifest.')
ninja.rule('gen_test_manifest',
command=('echo $in '
'| tr " " "\\n" '
'| sed "s,^,document.write(\'<script src=\\"$prefix,g" '
'| sed "s,$$,\\"></script>\');,g" '
'> $out'),
description='generate $out')
ninja.newline()
ninja.comment('Setup python packages.')
ninja.rule('setup',
command=('cd $dir '
'&& export PYTHONUSERBASE="$$PWD" '
'&& python setup.py -q install --user '
'&& python setup.py -q clean --all'),
generator=True,
description='setup $dir')
def write_targets(ninja):
license_js = '$license_js'
wrapper_js = '$wrapper_js'
ninja.newline()
ninja.comment('Libraries.')
# Closure Compiler v20140625
jscompiler_jar = '$jscompiler_jar' # Globally defined to allow use in rules.
jscompiler_url = 'http://dl.google.com/closure-compiler/compiler-20140625.zip'
jscompiler_zip = 'vendor/closure-compiler/compiler-20140625.zip'
jscompiler_zip_dest = 'vendor/closure-compiler'
jscompiler_zip_outs = [
'vendor/closure-compiler/COPYING',
'vendor/closure-compiler/README.md',
'vendor/closure-compiler/compiler.jar',
]
ninja.build(jscompiler_zip, 'download',
variables=[('url', jscompiler_url)])
ninja.build(jscompiler_zip_outs, 'unzip', jscompiler_zip,
variables=[('dest', jscompiler_zip_dest)])
# Closure Linter v2.3.13
jslinter_bin = '$jslinter_bin' # Globally defined to allow use in rules.
jslinter_url = 'https://closure-linter.googlecode.com/files/closure_linter-2.3.13.tar.gz'
jslinter_tgz = 'vendor/closure-linter/closure_linter-2.3.13.tar.gz'
jslinter_tgz_dest = 'vendor/closure-linter'
jslinter_tgz_outs = [
'vendor/closure-linter/PKG-INFO',
'vendor/closure-linter/setup.py',
'vendor/closure-linter/closure_linter/',
'vendor/closure-linter/closure_linter/requireprovidesorter.py',
'vendor/closure-linter/closure_linter/scopeutil.py',
'vendor/closure-linter/closure_linter/ecmalintrules.py',
'vendor/closure-linter/closure_linter/error_fixer.py',
'vendor/closure-linter/closure_linter/javascripttokenizer.py',
'vendor/closure-linter/closure_linter/runner.py',
'vendor/closure-linter/closure_linter/checkerbase.py',
'vendor/closure-linter/closure_linter/common/',
'vendor/closure-linter/closure_linter/common/simplefileflags.py',
'vendor/closure-linter/closure_linter/common/tokenizer.py',
'vendor/closure-linter/closure_linter/common/error.py',
'vendor/closure-linter/closure_linter/common/erroraccumulator.py',
'vendor/closure-linter/closure_linter/common/htmlutil.py',
'vendor/closure-linter/closure_linter/common/tokens.py',
'vendor/closure-linter/closure_linter/common/lintrunner.py',
'vendor/closure-linter/closure_linter/common/position.py',
'vendor/closure-linter/closure_linter/common/matcher.py',
'vendor/closure-linter/closure_linter/common/__init__.py',
'vendor/closure-linter/closure_linter/common/erroroutput.py',
'vendor/closure-linter/closure_linter/common/errorhandler.py',
'vendor/closure-linter/closure_linter/common/filetestcase.py',
'vendor/closure-linter/closure_linter/javascriptstatetracker.py',
'vendor/closure-linter/closure_linter/__init__.py',
'vendor/closure-linter/closure_linter/statetracker.py',
'vendor/closure-linter/closure_linter/error_check.py',
'vendor/closure-linter/closure_linter/fixjsstyle.py',
'vendor/closure-linter/closure_linter/errorrules.py',
'vendor/closure-linter/closure_linter/errorrecord.py',
'vendor/closure-linter/closure_linter/gjslint.py',
'vendor/closure-linter/closure_linter/checker.py',
'vendor/closure-linter/closure_linter/closurizednamespacesinfo.py',
'vendor/closure-linter/closure_linter/aliaspass.py',
'vendor/closure-linter/closure_linter/ecmametadatapass.py',
'vendor/closure-linter/closure_linter/testutil.py',
'vendor/closure-linter/closure_linter/errors.py',
'vendor/closure-linter/closure_linter/javascripttokens.py',
'vendor/closure-linter/closure_linter/indentation.py',
'vendor/closure-linter/closure_linter/javascriptlintrules.py',
'vendor/closure-linter/closure_linter/tokenutil.py',
'vendor/closure-linter/README',
]
jslinter_setup_outs = [
'vendor/closure-linter/bin/fixjsstyle',
'vendor/closure-linter/bin/gjslint',
'vendor/closure-linter/closure_linter.egg-info/dependency_links.txt',
'vendor/closure-linter/closure_linter.egg-info/entry_points.txt',
'vendor/closure-linter/closure_linter.egg-info/PKG-INFO',
'vendor/closure-linter/closure_linter.egg-info/requires.txt',
'vendor/closure-linter/closure_linter.egg-info/SOURCES.txt',
'vendor/closure-linter/closure_linter.egg-info/top_level.txt',
'vendor/closure-linter/dist/closure_linter-2.3.13-py2.7.egg',
'vendor/closure-linter/lib/python/site-packages/closure_linter-2.3.13-py2.7.egg',
'vendor/closure-linter/lib/python/site-packages/easy-install.pth',
'vendor/closure-linter/lib/python/site-packages/python_gflags-2.0-py2.7.egg',
]
ninja.build(jslinter_tgz, 'download',
variables=[('url', jslinter_url)])
ninja.build(jslinter_tgz_outs, 'untgz', jslinter_tgz,
variables=[('flags', '--strip-components 1'),
('exclude', '"*_test*"'),
('dest', jslinter_tgz_dest)])
ninja.build(jslinter_setup_outs, 'setup', jslinter_tgz_outs,
variables=[('dir', 'vendor/closure-linter')])
# WebPy @73f1119649
webpy_url = 'https://github.com/webpy/webpy/archive/73f1119649ffe54ba26ddaf6a612aaf1dab79b7f.zip'
webpy_zip = 'vendor/webpy/webpy-73f1119649ffe54ba26ddaf6a612aaf1dab79b7f.zip'
webpy_zip_root_dest = 'vendor/webpy'
webpy_zip_root_outs = [
'vendor/webpy/LICENSE.txt',
'vendor/webpy/README.md',
]
webpy_zip_web_dest = 'vendor/webpy/web'
webpy_zip_web_outs = [
'vendor/webpy/web/',
'vendor/webpy/web/__init__.py',
'vendor/webpy/web/application.py',
'vendor/webpy/web/browser.py',
'vendor/webpy/web/db.py',
'vendor/webpy/web/debugerror.py',
'vendor/webpy/web/form.py',
'vendor/webpy/web/http.py',
'vendor/webpy/web/httpserver.py',
'vendor/webpy/web/net.py',
'vendor/webpy/web/python23.py',
'vendor/webpy/web/session.py',
'vendor/webpy/web/template.py',
'vendor/webpy/web/test.py',
'vendor/webpy/web/utils.py',
'vendor/webpy/web/webapi.py',
'vendor/webpy/web/webopenid.py',
'vendor/webpy/web/wsgi.py',
]
webpy_zip_web_contrib_dest = 'vendor/webpy/web/contrib'
webpy_zip_web_contrib_outs = [
'vendor/webpy/web/contrib/',
'vendor/webpy/web/contrib/__init__.py',
'vendor/webpy/web/contrib/template.py',
]
webpy_zip_web_wsgiserver_dest = 'vendor/webpy/web/wsgiserver'
webpy_zip_web_wsgiserver_outs = [
'vendor/webpy/web/wsgiserver/',
'vendor/webpy/web/wsgiserver/LICENSE.txt',
'vendor/webpy/web/wsgiserver/__init__.py',
'vendor/webpy/web/wsgiserver/ssl_builtin.py',
'vendor/webpy/web/wsgiserver/ssl_pyopenssl.py',
]
webpy_zip_outs = (webpy_zip_root_outs + webpy_zip_web_outs +
webpy_zip_web_contrib_outs + webpy_zip_web_wsgiserver_outs)
ninja.build(webpy_zip, 'download',
variables=[('url', webpy_url)])
# Extracting each level individually enables removing the version prefix.
ninja.build(webpy_zip_root_outs, 'unzip', webpy_zip,
variables=[('flags', '-j'),
('paths', '"*LICENSE.txt" "*README.md"'),
('dest', webpy_zip_root_dest)])
ninja.build(webpy_zip_web_outs, 'unzip', webpy_zip,
variables=[('flags', '-j'),
('paths', '"*/web/*"'),
('exclude', '"*/web/contrib/*" "*/web/wsgiserver/*"'),
('dest', webpy_zip_web_dest)])
ninja.build(webpy_zip_web_contrib_outs, 'unzip', webpy_zip,
variables=[('flags', '-j'),
('paths', '"*/web/contrib/*"'),
('dest', webpy_zip_web_contrib_dest)])
ninja.build(webpy_zip_web_wsgiserver_outs, 'unzip', webpy_zip,
variables=[('flags', '-j'),
('paths', '"*/web/wsgiserver/*"'),
('dest', webpy_zip_web_wsgiserver_dest)])
# Jasmine v1.3.1
jasmine_url = 'https://github.com/pivotal/jasmine/raw/ea76a30d85218954625d4685b246218d9ca2dfe1/dist/jasmine-standalone-1.3.1.zip'
jasmine_zip = 'vendor/jasmine/jasmine-standalone-1.3.1.zip'
jasmine_zip_dest = 'vendor/jasmine'
jasmine_zip_outs = [
'vendor/jasmine/MIT.LICENSE',
'vendor/jasmine/jasmine.css',
'vendor/jasmine/jasmine.js',
'vendor/jasmine/jasmine-html.js',
]
ninja.build(jasmine_zip, 'download',
variables=[('url', jasmine_url)])
ninja.build(jasmine_zip_outs, 'unzip', jasmine_zip,
variables=[('flags', '-j'),
('paths', '"lib/*"'),
('dest', jasmine_zip_dest)])
wtf_shim = 'third-party/tracing-framework/shims/wtf-trace-closure.js'
js_srcs = find_js_sources() + [wtf_shim]
ninja.newline()
ninja.comment('Main.')
ninja.build('$builddir/spf.js', 'jscompile', js_srcs,
variables=[('flags', '$prod_jsflags $main_jsflags')],
implicit=[jscompiler_jar, license_js, wrapper_js])
ninja.build('$builddir/spf-debug.js', 'jscompile', js_srcs,
variables=[('flags', '$debug_jsflags $main_jsflags')],
implicit=[jscompiler_jar, license_js, wrapper_js])
ninja.build('$builddir/spf-trace.js', 'jscompile', js_srcs,
variables=[('flags', '$trace_jsflags $main_jsflags'),
('preamble', 'head -n 6 ' + wtf_shim)],
implicit=[jscompiler_jar, license_js, wrapper_js])
ninja.newline()
ninja.comment('Bootloader.')
ninja.build('$builddir/boot.js', 'jscompile', js_srcs,
variables=[('flags', '$prod_jsflags $bootloader_jsflags')],
implicit=[jscompiler_jar, license_js])
ninja.build('$builddir/boot-debug.js', 'jscompile', js_srcs,
variables=[('flags', '$debug_jsflags $bootloader_jsflags')],
implicit=[jscompiler_jar, license_js])
ninja.build('$builddir/boot-trace.js', 'jscompile', js_srcs,
variables=[('flags', '$trace_jsflags $bootloader_jsflags'),
('preamble', 'head -n 6 ' + wtf_shim)],
implicit=[jscompiler_jar, license_js])
ninja.newline()
ninja.comment('Development.')
dev_out = '$builddir/dev-spf-bundle.js'
ninja.build(dev_out, 'jscompile', js_srcs,
variables=[('flags', '$dev_jsflags')],
implicit=[jscompiler_jar, license_js])
ninja.newline()
ninja.comment('Tests.')
js_tests = find_js_tests()
jasmine_test_srcs = jasmine_zip_outs[1:]
jasmine_test_outs = [
'$builddir/test/jasmine.css',
'$builddir/test/jasmine.js',
'$builddir/test/jasmine-html.js',
]
manifest_srcs = [dev_out] + js_tests
manifest_out = '$builddir/test/manifest.js'
phantomjs_run_jasmine_src = 'third-party/phantomjs/examples/run-jasmine.js'
phantomjs_run_jasmine_out = '$builddir/test/run-jasmine.js'
test_outs = jasmine_test_outs + [manifest_out, phantomjs_run_jasmine_out]
runner_src = 'src/client/testing/runner.html'
runner_out = '$builddir/test/runner.html'
for test_src, test_out in zip(jasmine_test_srcs, jasmine_test_outs):
ninja.build(test_out, 'symlink', test_src,
variables=[('prefix', '../' * test_out.count('/'))])
ninja.build(manifest_out, 'gen_test_manifest', manifest_srcs,
variables=[('prefix', '../' * manifest_out.count('/'))])
ninja.build(phantomjs_run_jasmine_out, 'symlink', phantomjs_run_jasmine_src,
variables=[('prefix',
'../' * phantomjs_run_jasmine_out.count('/'))])
ninja.build(runner_out, 'symlink', runner_src,
variables=[('prefix', '../' * runner_out.count('/'))],
implicit=test_outs)
ninja.newline()
ninja.comment('Demo.')
demo_srcs = find_demo_sources()
demo_app_src = 'src/server/demo/app.py'
demo_app_out = '$builddir/demo/app.py'
demo_srcs.remove(demo_app_src)
demo_outs = [s.replace('src/server/', '$builddir/') for s in demo_srcs]
demo_srcs.append('vendor/webpy/web')
demo_outs.append('$builddir/demo/web')
demo_srcs.append(dev_out)
demo_outs.append(dev_out.replace('$builddir/', '$builddir/demo/static/'))
for demo_src, demo_out in zip(demo_srcs, demo_outs):
if demo_src == 'vendor/webpy/web':
implicit_deps = webpy_zip_outs
else:
implicit_deps = None
ninja.build(demo_out, 'symlink', demo_src,
variables=[('prefix', '../' * demo_out.count('/'))],
implicit=implicit_deps)
ninja.build(demo_app_out, 'symlink', demo_app_src,
variables=[('prefix', '../' * demo_app_out.count('/'))],
implicit=demo_outs)
ninja.newline()
ninja.comment('Generate build file.')
# Update the build file if this script or the build syntax changes.
ninja.build('build.ninja', 'configure',
implicit=['./configure.py'])
def write_aliases(ninja):
ninja.newline()
ninja.comment('Tools.')
ninja.build('lint', 'jslint', 'src/client',
variables=[('flags', '--recurse')],
implicit=['$jslinter_bin'])
ninja.build('fix', 'jsfix', 'src/client',
variables=[('flags', '--recurse')],
implicit=['$jsfixer_bin'])
ninja.newline()
ninja.comment('Aliases.')
aliases = [
ninja.build('spf', 'phony',
'$builddir/spf.js'),
ninja.build('spf-debug', 'phony',
'$builddir/spf-debug.js'),
ninja.build('spf-trace', 'phony',
'$builddir/spf-trace.js'),
ninja.build('bootloader', 'phony',
'$builddir/boot.js'),
ninja.build('debug-bootloader', 'phony',
'$builddir/boot-debug.js'),
ninja.build('tracing-bootloader', 'phony',
'$builddir/boot-trace.js'),
ninja.build('tests', 'phony',
'$builddir/test/runner.html'),
ninja.build('demo', 'phony',
'$builddir/demo/app.py'),
]
aliases = [a for outs in aliases for a in outs] # Reduce to a single list.
ninja.build('all', 'phony', aliases)
ninja.newline()
ninja.comment('Default.')
ninja.default('spf')
def main():
check_requirements()
fetch_dependencies()
ninja = create_ninja_file()
write_header(ninja)
write_variables(ninja)
write_rules(ninja)
write_targets(ninja)
write_aliases(ninja)
print('Wrote %s' % ninja.output.name)
if __name__ == '__main__':
main()
| mit | -3,069,535,320,014,338,000 | 39.219512 | 131 | 0.628335 | false |
LittleRichard/luxalert | luxweb/luxweb/spiders/KensingtonSpider.py | 1 | 4229 | import datetime
import re
import scrapy
from nest.storage.luxalert.entity.Apartment import Apartment
from nest.storage.luxalert.entity.ApartmentSnapshot import ApartmentSnapshot
from luxweb.luxweb import HMTL_SPIDER_DATA_TUPLE_KEY
from luxweb.luxweb.ScrapeErrorHandler import ScrapeErrorHandler
from luxweb.luxweb.spiders.AbstractHTMLSpider import AbstractHTMLSpider
class KensingtonSpider(AbstractHTMLSpider):
THE_KENSINGTON_NAME = 'The Kensington'
BUILDING_NAMES = (
THE_KENSINGTON_NAME,
)
# name of the spider, a scrapy-required thing
name = "kensington"
@classmethod
def get_building_names(cls):
return KensingtonSpider.BUILDING_NAMES
def start_requests(self):
# urls to scrape
urls = [
'http://www.kensingtonboston.com/floor-plans/apartments'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
@ScrapeErrorHandler.wrap_to_raise
def parse(self, response):
buildings_by_name = self.get_buildings_by_name(KensingtonSpider.BUILDING_NAMES)
# loops through all <div< with the class = "plan_detail"
for plan in response.xpath('//div[@class="plan_detail"]'):
# unit: extract first element in <h3> list
unit = str(plan.xpath('div[@class="plan_info"]/h3/text()').extract_first())
# floor: 2nd character if first character is 0, otherwise first 2 characters
if unit[0] == "0":
floor = str(unit[1])
else:
floor = str(unit[:2])
# bedrooms: if bedroom is a studio update text, else grab first character
if plan.xpath('div[@class="plan_info"]/ul/li[1]/text()').extract_first() == "STUDIO":
bedrooms = int(0)
else:
bedrooms = int(plan.xpath('div[@class="plan_info"]/ul/li[1]/text()').extract_first()[0])
# bathrooms: first character from string
bathrooms_str = plan.xpath('div[@class="plan_info"]/ul/li[2]/text()').extract_first()
bathrooms_str = re.sub(u' BATH.+', u'', bathrooms_str)
bathrooms = float(bathrooms_str)
# sq_ft: remove "SQ. FEET" and ",""
sq_ft = plan.xpath('div[@class="plan_info"]/ul/li[3]/text()').extract_first()
sq_ft = sq_ft.replace("SQ. FEET", "")
sq_ft = sq_ft.replace(",", "")
sq_ft = int(sq_ft)
# price: remove "FROM $" and "/MONTH" and ","
price = plan.xpath('div[@class="plan_info"]/ul/li[4]/text()').extract_first()
price = price.replace("FROM $", "")
price = price.replace("/MONTH", "")
price = price.replace(",", "")
price = float(price)
# availability: from 10th character onwards, change "NOW" to today's date
if plan.xpath('div[@class="plan_info"]/ul/li[5]/text()').extract_first()[10:] == "NOW":
availability = datetime.datetime.utcnow().date()
else:
availability_str = str(plan.xpath('div[@class="plan_info"]/ul/li[5]/text()').extract_first()[10:])
availability = datetime.datetime.strptime(availability_str, '%m/%d/%Y').date()
# floor_plan
floor_plan = str(plan.xpath('div[@class="plan_image desktop_and_tab"]/img/@src').extract_first())
building = buildings_by_name[KensingtonSpider.THE_KENSINGTON_NAME]
apartment = Apartment(
building,
floor,
sq_ft,
bathrooms,
bedrooms,
unit,
)
apartment_snap = ApartmentSnapshot(
apartment,
datetime.datetime.utcnow(),
price,
availability,
floor_plan
)
yield {HMTL_SPIDER_DATA_TUPLE_KEY: (apartment, apartment_snap)}
next_page = response.xpath('//a[@rel="next"]/@href').extract_first()
if (next_page is not None) and (next_page != "javascript:void(0);"):
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| gpl-3.0 | 332,383,384,053,203,200 | 36.424779 | 114 | 0.572476 | false |
clovertrail/cloudinit-bis | cloudinit/config/cc_scripts_per_boot.py | 1 | 1760 | # vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Scripts Per Boot
----------------
**Summary:** run per boot scripts
Any scripts in the ``scripts/per-boot`` directory on the datasource will be run
every time the system boots. Scripts will be run in alphabetical order. This
module does not accept any config keys.
**Internal name:** ``cc_scripts_per_boot``
**Module frequency:** per always
**Supported distros:** all
"""
import os
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
SCRIPT_SUBDIR = 'per-boot'
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except Exception:
log.warn("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
raise
| gpl-3.0 | 9,048,880,670,074,391,000 | 29.877193 | 79 | 0.696591 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/onlinedizi_scraper.py | 1 | 4676 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://onlinedizi.co'
class OnlineDizi_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'OnlineDizi'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.25)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'dropdown-menu'})
if fragment:
match = re.search('''href=['"]([^'"]+)[^>]*>(?:Altyaz.{1,3}s.{1,3}z)<''', fragment[0])
if match:
option_url = urlparse.urljoin(self.base_url, match.group(1))
html = self._http_get(option_url, cache_limit=2)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'video-player'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], cache_limit=.25)
iframe_url = dom_parser.parse_dom(html, 'iframe', {'id': 'ifr'}, ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], allow_redirect=False, method='HEAD', cache_limit=.25)
if html.startswith('http'):
stream_url = html
host = urlparse.urlparse(stream_url).hostname
stream_url += '|User-Agent=%s' % (scraper_utils.get_ua())
quality = QUALITIES.HIGH
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = '''href=['"]([^'"]+-%s-sezon-%s-bolum[^'"]*)''' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
html = self._http_get(self.base_url, cache_limit=48)
results = []
seen_urls = {}
norm_title = scraper_utils.normalize_title(title)
for fragment in dom_parser.parse_dom(html, 'ul', {'class': '[^"]*all-series-list[^"]*'}):
for match in re.finditer('''href=["']([^'"]+)[^>]+>([^<]+)''', fragment):
url, match_title = match.groups()
if url not in seen_urls:
seen_urls[url] = True
if norm_title in scraper_utils.normalize_title(match_title):
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
results.append(result)
return results
| gpl-2.0 | -8,868,857,414,232,558,000 | 43.533333 | 182 | 0.566938 | false |
hailongqiu/new-deepin-media-player | src/plugins/youku/youku_web_parse.py | 1 | 5006 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 XXX, Inc.
# 2013 红铭曼,王芳
#
# Author: 红铭曼,王芳 <[email protected]>
# Maintainer: 红铭曼,王芳 <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from BeautifulSoup import BeautifulSoup
import urllib2
import re
class YoukuWebParse(object):
def __init__(self):
self.headers = {"Accept":"*/*", "Accept-Language":"zh-CN", "":"",
"User-Agent":"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)",
#"Accept-Encoding":"gzip, deflate",
"Connection":"Keep-Alive"}
def scan_movie_leave(self, addr):
temp_info = None
url = addr
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
music_list = sounp.findAll("a", {"class":"btnShow btnplayposi"})
for link in music_list:
addr = link.get("href") # 获取地址.
title = link.get("title") # 获取标题.
temp_info = (addr, title)
return temp_info
def scan_3_leave(self, addr):
url = addr
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
p_title_list = sounp.findAll("a",
{"href": re.compile("http://"),
"title" : re.compile("\d"),
"charset" : re.compile("-"),
"target" : re.compile('_')
})
temp_list = []
#print p_title_list
for list_ in p_title_list:
addr_ = list_.get("href")
name_ = list_.get("title")
#print name_, addr_
temp_list.append((addr_, name_))
return temp_list
def parse_web(self, addr, index=1):
page_num = None
all_sum = None
info_list = []
url = addr + "%d.html" % (index)
#print url
#data = urllib2.urlopen(url).read()
req = urllib2.Request(url=url, headers=self.headers)
data = urllib2.urlopen(url).read()
#
sounp = BeautifulSoup(data)
p_title_list = sounp.findAll('li', {"class" : "p_title"})
for link in p_title_list:
a_link = link.a # <a href = "......" title.....> 中的 'a'.
addr = a_link.get("href") # 获取地址.
title = a_link.get("title") # 获取标题.
#print "addr:", addr, "title:", title
info_list.append((addr, title))
if index == 1:
page_num = len(p_title_list)
#print "link len:", page_num
all_sum_str = sounp.findAll("div", {"class" : "stat"})
all_sum_utf_8 = str(all_sum_str[0].string).replace("条", "")
all_sum = int(str(all_sum_utf_8.split("/")[1].strip()))
#print "总数:", all_sum
return info_list, page_num, all_sum
def get_sum_page(all_sum, page_num):
page_sum = all_sum / page_num
page_mod = all_sum % page_num
if page_mod > 0:
page_sum += 1
return page_sum
if __name__ == "__main__":
from youku_web import v_olist_dict
v_olist_keys = v_olist_dict.keys()
youku_web_parse = YoukuWebParse()
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_zcc001eb6962411de83b1.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_zcc000b60962411de83b1.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z84933d227a4911e1b2ac.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z8820e97ecfeb11e19013.html")
#youku_web_parse.parse_web("http://www.youku.com/show_page/id_z0bb2a948c24311df97c0.html")
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["热血"])
'''
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["格斗"])
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["恋爱"])
print get_sum_page(all_sum, page_num)
print get_sum_page(all_sum, page_num)
'''
for i in range(1, get_sum_page(all_sum, page_num + 1)):
info_list, page_num, all_sum = youku_web_parse.parse_web(v_olist_dict["热血"], i)
for info in info_list:
print info[0], info[1]
| gpl-3.0 | 5,523,252,859,593,766,000 | 36.257576 | 94 | 0.573607 | false |
codenamejason/data_from_pdf | utils.py | 1 | 3856 | # vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2014, Jason Romero
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Utility functions for PDF library.
"""
__author__ = "Jason Romero"
__author_email__ = "[email protected]"
#ENABLE_PSYCO = False
#if ENABLE_PSYCO:
# try:
# import psyco
# except ImportError:
# ENABLE_PSYCO = False
#
#if not ENABLE_PSYCO:
# class psyco:
# def proxy(func):
# return func
# proxy = staticmethod(proxy)
def readUntilWhitespace(stream, maxchars=None):
txt = ""
while True:
tok = stream.read(1)
if tok.isspace() or not tok:
break
txt += tok
if len(txt) == maxchars:
break
return txt
def readNonWhitespace(stream):
tok = ' '
while tok == '\n' or tok == '\r' or tok == ' ' or tok == '\t':
tok = stream.read(1)
return tok
class ConvertFunctionsToVirtualList(object):
def __init__(self, lengthFunction, getFunction):
self.lengthFunction = lengthFunction
self.getFunction = getFunction
def __len__(self):
return self.lengthFunction()
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError, "sequence indices must be integers"
len_self = len(self)
if index < 0:
# support negative indexes
index = len_self + index
if index < 0 or index >= len_self:
raise IndexError, "sequence index out of range"
return self.getFunction(index)
def RC4_encrypt(key, plaintext):
S = [i for i in range(256)]
j = 0
for i in range(256):
j = (j + S[i] + ord(key[i % len(key)])) % 256
S[i], S[j] = S[j], S[i]
i, j = 0, 0
retval = ""
for x in range(len(plaintext)):
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
t = S[(S[i] + S[j]) % 256]
retval += chr(ord(plaintext[x]) ^ t)
return retval
def matrixMultiply(a, b):
return [[sum([float(i)*float(j)
for i, j in zip(row, col)]
) for col in zip(*b)]
for row in a]
class PyPdfError(Exception):
pass
class PdfReadError(PyPdfError):
pass
class PageSizeNotDefinedError(PyPdfError):
pass
if __name__ == "__main__":
# test RC4
out = RC4_encrypt("Key", "Plaintext")
print repr(out)
pt = RC4_encrypt("Key", out)
print repr(pt)
| artistic-2.0 | -7,082,099,426,749,086,000 | 30.606557 | 77 | 0.641079 | false |
MalkIPP/ipp_work | ipp_work/example/tax_rate_by_decile.py | 1 | 1569 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 17:07:33 2015
@author: malkaguillot
"""
from ipp_work.utils import survey_simulate, df_weighted_average_grouped
from ipp_work.simulations.ir_marg_rate import varying_survey_simulation
from ipp_work.example.quantiles_of_revimp import make_weighted_deciles_of_variable
import pandas
year = 2009
ind_variables = ['idmen', 'quimen', 'idfoy', 'salaire_imposable', 'salaire_net']
foy_variables = ['irpp', 'decile_rfr', 'weight_foyers', 'idfoy_original', 'rfr']
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois', 'smic55']
df_by_entity_key_plural, simulation = survey_simulate(used_as_input_variables, year, ind_variables,
foy_variables = foy_variables)
df_individus = df_by_entity_key_plural['individus']
df_foyers = df_by_entity_key_plural['foyers']
tax_rates = varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni',
used_as_input_variables = used_as_input_variables)
tax_rates = tax_rates[['idfoy_original', 'marginal_rate', 'average_rate']]
df_foyers = pandas.merge(df_foyers, tax_rates, on = 'idfoy_original')
make_weighted_deciles_of_variable(df_foyers, 'rfr', 'weight_foyers', 100)
Wconcat = df_weighted_average_grouped(
dataframe = df_foyers,
groupe = 'decile_of_rfr',
varlist = [
'marginal_rate', 'average_rate'
],
)
print Wconcat
df_foyers['decile_rfr'].count()
df_foyers['rfr'].describe()
df_foyers['weight_foyers'].describe() | agpl-3.0 | -7,333,223,153,781,606,000 | 37.292683 | 100 | 0.667304 | false |
srznew/heat | heat/engine/resources/openstack/nova/server.py | 1 | 65002 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources import stack_user
from heat.engine import support
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
LOG = logging.getLogger(__name__)
class Server(stack_user.StackUser):
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name',
'volume_id',
'image_id',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
) = (
'uuid', 'network', 'fixed_ip', 'port',
)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS,
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction'
' only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing the entire server'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=HEAT_CFNTOOLS,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling.'),
default=cfg.CONF.default_software_config_transport,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init.'),
default=''
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id. '
'Each network will have two keys in dict, they are network '
'name and network id. '
'The port ID may be obtained through the following expression: '
'"{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'port]}".'),
type=attributes.Schema.MAP
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id. '),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
}
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
physical_resource_name_limit = 53
default_client_name = 'nova'
entity = 'servers'
def translation_rules(self):
return [properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
source_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID)]
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
def _server_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def _populate_deployments_metadata(self, meta):
meta['deployments'] = meta.get('deployments', [])
if self.transport_poll_server_heat():
meta['os-collect-config'] = {'heat': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'stack_id': self.stack.identifier().stack_path(),
'resource_name': self.name}
}
if self.transport_zaqar_message():
queue_id = self.physical_resource_name()
self.data_set('metadata_queue_id', queue_id)
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id)
queue = zaqar.queue(queue_id)
queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL})
meta['os-collect-config'] = {'zaqar': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'queue_id': queue_id}
}
elif self.transport_poll_server_cfn():
meta['os-collect-config'] = {'cfn': {
'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url,
'access_key_id': self.access_key,
'secret_access_key': self.secret_key,
'stack_name': self.stack.name,
'path': '%s.Metadata' % self.name}
}
elif self.transport_poll_temp_url():
container = self.physical_resource_name()
object_name = str(uuid.uuid4())
self.client('swift').put_container(container)
url = self.client_plugin('swift').get_temp_url(
container, object_name, method='GET')
put_url = self.client_plugin('swift').get_temp_url(
container, object_name)
self.data_set('metadata_put_url', put_url)
self.data_set('metadata_object_name', object_name)
meta['os-collect-config'] = {'request': {
'metadata_url': url}
}
self.client('swift').put_object(
container, object_name, jsonutils.dumps(meta))
self.metadata_set(meta)
def _register_access_key(self):
'''
Access is limited to this resource, which created the keypair
'''
def access_allowed(resource_name):
return resource_name == self.name
if self.transport_poll_server_cfn():
self.stack.register_access_allowed_handler(
self.access_key, access_allowed)
elif self.transport_poll_server_heat():
self.stack.register_access_allowed_handler(
self._get_user_id(), access_allowed)
def _create_transport_credentials(self):
if self.transport_poll_server_cfn():
self._create_user()
self._create_keypair()
elif (self.transport_poll_server_heat() or
self.transport_zaqar_message()):
self.password = uuid.uuid4().hex
self._create_user()
self._register_access_key()
@property
def access_key(self):
return self.data().get('access_key')
@property
def secret_key(self):
return self.data().get('secret_key')
@property
def password(self):
return self.data().get('password')
@password.setter
def password(self, password):
if password is None:
self.data_delete('password')
else:
self.data_set('password', password, True)
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def transport_poll_server_cfn(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN
def transport_poll_server_heat(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT
def transport_poll_temp_url(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL
def transport_zaqar_message(self):
return self.properties.get(
self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE
def get_software_config(self, ud_content):
try:
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials()
self._populate_deployments_metadata(metadata)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
flavor = self.properties[self.FLAVOR]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
image = self.properties[self.IMAGE]
if image:
image = self.client_plugin('glance').get_image_id(image)
flavor_id = self.client_plugin().get_flavor_id(flavor)
instance_meta = self.properties[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self.properties[self.SCHEDULER_HINTS]
if cfg.CONF.stack_scheduler_hints:
if scheduler_hints is None:
scheduler_hints = {}
scheduler_hints['heat_root_stack_id'] = self.stack.root_stack_id()
scheduler_hints['heat_stack_id'] = self.stack.id
scheduler_hints['heat_stack_name'] = self.stack.name
scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack()
scheduler_hints['heat_resource_name'] = self.name
nics = self._build_nics(self.properties[self.NETWORKS])
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME,
cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _build_nics(self, networks):
if not networks:
return None
nics = []
for net_data in networks:
nic_info = {}
net_identifier = (net_data.get(self.NETWORK_UUID) or
net_data.get(self.NETWORK_ID))
if net_identifier:
if self.is_using_neutron():
net_id = (self.client_plugin(
'neutron').resolve_network(
net_data, self.NETWORK_ID, self.NETWORK_UUID))
else:
net_id = (self.client_plugin(
'nova').get_nova_network_id(net_identifier))
nic_info['net-id'] = net_id
if net_data.get(self.NETWORK_FIXED_IP):
ip = net_data[self.NETWORK_FIXED_IP]
if netutils.is_valid_ipv6(ip):
nic_info['v6-fixed-ip'] = ip
else:
nic_info['v4-fixed-ip'] = ip
if net_data.get(self.NETWORK_PORT):
nic_info['port-id'] = net_data[self.NETWORK_PORT]
nics.append(nic_info)
return nics
def _add_port_for_address(self, server):
"""Method adds port id to list of addresses.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(server.addresses)
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
return self._extend_networks(nets)
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
for key in list(nets.keys()):
try:
net_id = self.client_plugin().get_net_id_by_label(key)
except (exception.NovaNetworkNotFound,
exception.PhysicalResourceNameAmbiguity):
net_id = None
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_port_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
nets = self.properties[self.NETWORKS]
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID)
or res.properties.get(subnet.Subnet.NETWORK))
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = (net.get(self.NETWORK_ID) or
net.get(self.NETWORK_UUID))
if net_id and net_id == subnet_net:
deps += (self, res)
break
def _get_network_matches(self, old_networks, new_networks):
# make new_networks similar on old_networks
for new_net in new_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if new_net.get(key) is '', convert to None
if not new_net.get(key):
new_net[key] = None
for old_net in old_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if old_net.get(key) is '', convert to None
if not old_net.get(key):
old_net[key] = None
# find matches and remove them from old and new networks
not_updated_networks = []
for net in old_networks:
if net in new_networks:
new_networks.remove(net)
not_updated_networks.append(net)
for net in not_updated_networks:
old_networks.remove(net)
return not_updated_networks
def _get_network_id(self, net):
net_id = None
if net.get(self.NETWORK_ID):
if self.is_using_neutron():
net_id = self.client_plugin(
'neutron').resolve_network(
net,
self.NETWORK_ID, self.NETWORK_UUID)
else:
net_id = self.client_plugin(
'nova').get_nova_network_id(net.get(self.NETWORK_ID))
return net_id
def update_networks_matching_iface_port(self, nets, interfaces):
def find_equal(port, net_id, ip, nets):
for net in nets:
if (net.get('port') == port or
(net.get('fixed_ip') == ip and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id))):
return net
def find_poor_net(net_id, nets):
for net in nets:
if (not net.get('port') and not net.get('fixed_ip') and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id)):
return net
for iface in interfaces:
# get interface properties
props = {'port': iface.port_id,
'net_id': iface.net_id,
'ip': iface.fixed_ips[0]['ip_address'],
'nets': nets}
# try to match by port or network_id with fixed_ip
net = find_equal(**props)
if net is not None:
net['port'] = props['port']
continue
# find poor net that has only network_id
net = find_poor_net(props['net_id'], nets)
if net is not None:
net['port'] = props['port']
def _update_flavor(self, prop_diff):
flavor_update_policy = (
prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
self.properties[self.FLAVOR_UPDATE_POLICY])
flavor = prop_diff[self.FLAVOR]
if flavor_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, prop_diff):
image_update_policy = (
prop_diff.get(self.IMAGE_UPDATE_POLICY) or
self.properties[self.IMAGE_UPDATE_POLICY])
if image_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
image = prop_diff[self.IMAGE]
image_id = self.client_plugin('glance').get_image_id(image)
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = (prop_diff.get(self.ADMIN_PASS) or
self.properties[self.ADMIN_PASS])
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image_id,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, prop_diff):
updaters = []
new_networks = prop_diff.get(self.NETWORKS)
attach_first_free_port = False
if not new_networks:
new_networks = []
attach_first_free_port = True
old_networks = self.properties[self.NETWORKS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
# if old networks is None, it means that the server got first
# free port. so we should detach this interface.
if old_networks is None:
for iface in interfaces:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# if we have any information in networks field, we should:
# 1. find similar networks, if they exist
# 2. remove these networks from new_networks and old_networks
# lists
# 3. detach unmatched networks, which were present in old_networks
# 4. attach unmatched networks, which were present in new_networks
else:
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_networks = self._get_network_matches(
old_networks, new_networks)
self.update_networks_matching_iface_port(
old_networks + not_updated_networks, interfaces)
# according to nova interface-detach command detached port
# will be deleted
for net in old_networks:
if net.get(self.NETWORK_PORT):
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args':
(net.get(self.NETWORK_PORT),)})
)
handler_kwargs = {'port_id': None, 'net_id': None, 'fip': None}
# attach section similar for both variants that
# were mentioned above
for net in new_networks:
if net.get(self.NETWORK_PORT):
handler_kwargs['port_id'] = net.get(self.NETWORK_PORT)
elif net.get(self.NETWORK_ID):
handler_kwargs['net_id'] = self._get_network_id(net)
handler_kwargs['fip'] = net.get('fixed_ip')
elif net.get(self.NETWORK_UUID):
handler_kwargs['net_id'] = net['uuid']
handler_kwargs['fip'] = net.get('fixed_ip')
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if new_networks is None, we should attach first free port,
# according to similar behavior during instance creation
if attach_first_free_port:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True)
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
updaters = []
server = None
if self.METADATA in prop_diff:
server = self.client().servers.get(self.resource_id)
self.client_plugin().meta_update(server,
prop_diff[self.METADATA])
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(prop_diff))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(prop_diff))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
server.change_password(prop_diff[self.ADMIN_PASS])
if self.NAME in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
self.client_plugin().rename(server, prop_diff[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, prop_diff))
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
# Re-resolve the template metadata and merge it with the
# current resource metadata. This is necessary because the
# attributes referenced in the template metadata may change
# and the resource itself adds keys to the metadata which
# are not specified in the template (e.g the deployments data)
meta = self.metadata_get(refresh=True) or {}
tmpl_meta = self.t.metadata()
meta.update(tmpl_meta)
self.metadata_set(meta)
@staticmethod
def _check_maximum(count, maximum, msg):
'''
Check a count against a maximum, unless maximum is -1 which indicates
that there is no limit
'''
if maximum != -1 and count > maximum:
raise exception.StackValidationFailed(message=msg)
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bootable_vol = False
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable_vol = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
property_tuple = (volume_id, snapshot_id, image_id, swap_size)
if property_tuple.count(None) < 3:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE_ID,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
if property_tuple.count(None) == 4:
msg = _('Either volume_id, snapshot_id, image_id or '
'swap_size must be specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id, snapshot_id, image_id)):
bootable_vol = True
return bootable_vol
def _validate_network(self, network):
if (network.get(self.NETWORK_ID) is None
and network.get(self.NETWORK_PORT) is None
and network.get(self.NETWORK_UUID) is None):
msg = _('One of the properties "%(id)s", "%(port_id)s", '
'"%(uuid)s" should be set for the '
'specified network of server "%(server)s".'
'') % dict(id=self.NETWORK_ID,
port_id=self.NETWORK_PORT,
uuid=self.NETWORK_UUID,
server=self.name)
raise exception.StackValidationFailed(message=msg)
if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
'to the network "%(network)s" for the server '
'"%(server)s". The "%(uuid)s" property is deprecated. '
'Use only "%(id)s" property.'
'') % dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name)
raise exception.StackValidationFailed(message=msg)
elif network.get(self.NETWORK_UUID):
LOG.info(_LI('For the server "%(server)s" the "%(uuid)s" '
'property is set to network "%(network)s". '
'"%(uuid)s" property is deprecated. Use '
'"%(id)s" property instead.'),
dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name))
def validate(self):
'''
Validate any of the provided params
'''
super(Server, self).validate()
bootable_vol = self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
if not image and not bootable_vol:
msg = _('Neither image nor bootable volume is specified for'
' instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
# network properties 'uuid' and 'network' shouldn't be used
# both at once for all networks
networks = self.properties[self.NETWORKS] or []
# record if any networks include explicit ports
networks_with_port = False
for network in networks:
networks_with_port = (networks_with_port or
network.get(self.NETWORK_PORT))
self._validate_network(network)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata is not None or personality:
limits = self.client_plugin().absolute_limits()
# if 'security_groups' present for the server and explict 'port'
# in one or more entries in 'networks', raise validation error
if networks_with_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata is not None:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))),
limits['maxPersonalitySize'], msg)
def _delete_temp_url(self):
object_name = self.data().get('metadata_object_name')
if not object_name:
return
try:
container = self.physical_resource_name()
swift = self.client('swift')
swift.delete_object(container, object_name)
headers = swift.head_container(container)
if int(headers['x-container-object-count']) == 0:
swift.delete_container(container)
except Exception as ex:
self.client_plugin('swift').ignore_not_found(ex)
def _delete_queue(self):
queue_id = self.data().get('metadata_queue_id')
if not queue_id:
return
client_plugin = self.client_plugin('zaqar')
zaqar = client_plugin.create_for_tenant(
self.stack.stack_user_project_id)
try:
zaqar.queue(queue_id).delete()
except Exception as ex:
client_plugin.ignore_not_found(ex)
self.data_delete('metadata_queue_id')
def handle_snapshot_delete(self, state):
if state[0] != self.FAILED:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self.handle_delete()
def handle_delete(self):
if self.resource_id is None:
return
if self.user_data_software_config():
self._delete_user()
self._delete_temp_url()
self._delete_queue()
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client().images.get(prg.image_id)
if image.status in ('DELETED', 'ERROR'):
raise exception.Error(image.status)
elif image.status == 'ACTIVE':
prg.image_complete = True
if not self.handle_delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
'''
Suspend a server - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = resource.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
'''
Resume a server - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client().images.get(image_id)
if image.status == 'ACTIVE':
return True
elif image.status == 'ERROR' or image.status == 'DELETED':
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
try:
self.client().images.delete(image_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = function.resolve(self.properties.data)
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
| apache-2.0 | -4,687,122,793,121,443,000 | 40.828829 | 79 | 0.53683 | false |
line72/subte | libsubte/interface/StopMarker.py | 1 | 9828 | #
# Copyright (C) 2012 - Marcus Dillavou
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import math
import weakref
from gi.repository import Gtk, Champlain, Clutter, GLib
import libsubte
import shapes
class StopMarker(Champlain.CustomMarker):
def __init__(self, gtmap, stop):
Champlain.CustomMarker.__init__(self)
self._gtmap = None
self.gtmap = gtmap
self._stop = None
self.stop = stop
self.full_picture_box = None
self.unselected_color = Clutter.Color.new(0xf0, 0x02, 0xf0, 0xbb)
self.picture_color = Clutter.Color.new(0xef, 0xe4, 0x35, 0xbb)
self.modified_color = Clutter.Color.new(0xff, 0x10, 0x28, 0xbb)
self.route_color = Clutter.Color.new(0x0d, 0x9a, 0x27, 0xbb)
self.selected_color = Clutter.Color.new(0xfd, 0xfd, 0x02, 0xbb)
# draw our clickable marker
self.marker = Clutter.Actor()
self.marker.set_background_color(self.unselected_color)
self.marker.set_size(16, 16)
self.marker.set_position(0, 0)
self.marker.set_anchor_point(8, 8)
self.marker.set_reactive(True)
self.add_actor(self.marker)
self.marker.show()
self._visible = False
self.set_location(self.stop.latitude, self.stop.longitude)
# trying to capture it, then make us emit a signal doesn't
# seem to be working
#!lukstafi -- changed button-release to button-press
# and uncommented next line
self.marker.connect('button-press-event', self.on_click)
self.set_reactive(False)
@property
def gtmap(self):
if self._gtmap:
return self._gtmap()
return None
@gtmap.setter
def gtmap(self, m):
if m:
self._gtmap = weakref.ref(m)
else:
self._gtmap = None
@property
def stop(self):
if self._stop:
return self._stop()
return None
@stop.setter
def stop(self, m):
if m:
self._stop = weakref.ref(m)
else:
self._stop = None
def selected(self, status):
if status:
self.marker.set_background_color(self.selected_color)
else:
self.marker.set_background_color(self.unselected_color)
return True
def clicked(self, status):
print 'StopMarker.clicked status=', status
if status == self._visible: # nothing to do here
return True
if status:
self.show()
else:
self.hide()
return True
def on_click(self, actor, event, user_data = None):
#!mwd - this doesn't work :(
print 'StopMarker.on_click (no emitting)', actor, event
#!lukstafi - commented out
#self.emit('button-press-event', event)
#!lukstafi - instead of signals we self-call and invoke the hook
self.clicked(True)
if libsubte.Stop.activate_stop_hook:
libsubte.Stop.activate_stop_hook(self.stop)
return True
def on_expand_picture(self, actor, event, picture):
self.full_picture_box = Clutter.Texture()
self.full_picture_box.set_from_file(picture.image)
self.full_picture_box.set_keep_aspect_ratio(True)
size = self.gtmap.get_allocated_width(), self.gtmap.get_allocated_height()
r1 = size[0] / float(size[1])
size2 = self.full_picture_box.get_base_size()
if picture.orientation == 0 or picture.orientation == 180:
r2 = size2[0] / float(size2[1])
else:
r2 = size2[1] / float(size2[0])
self.full_picture_box.set_position(0, 0)
self.full_picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
if r1 > r2: # use width
w = size[1] * r2
h = size[1]
else: # use height
w = size[0]
h = size[0] / r2
if picture.orientation != 0 and picture.orientation != 180:
w, h = h, w # reverse
self.full_picture_box.set_size(w, h)
self.full_picture_box.set_reactive(True)
#!lukstafi -- changed button-release to button-press
self.full_picture_box.connect('button-press-event', self.on_close_picture)
self.full_picture_box.show_all()
self.gtmap.show_image(self.full_picture_box)
return False
def on_close_picture(self, actor, event):
if self.full_picture_box:
self.gtmap.remove_image(self.full_picture_box)
self.full_picture_box.hide_all()
self.full_picture_box = None
return False
def show(self):
self.gtmap.unshow_stop_info()
width = 500
height = 200
# our meta info
group = Clutter.Group()
group.set_position(8, -8)
group.set_anchor_point(width / 2, height)
# just drawn a rectange or something
rect = shapes.Bubble()
c = Clutter.Color.new(0xde, 0xde, 0xde, 0xfe)
rect.set_color(c)
rect.set_has_outline(True)
rect.set_outline_color(Clutter.Color.new(0x00, 0x00, 0x00, 0xff))
rect.set_size(width, height)
rect.set_position(0, 8)
rect.set_anchor_point(0, 0)
rect.set_has_shadow(True)
group.add_child(rect)
name = Clutter.Text()
if self.stop.name:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.name.replace('&', '&'))
else:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.stop_id)
name.set_size(400, 25)
name.set_position(10, 15)
name.set_anchor_point(0, 0)
group.add_child(name)
info = Clutter.Text()
info.set_use_markup(True)
info.set_text('')
info.set_size(200, 75)
info.set_position(10, 50)
info.set_anchor_point(0, 0)
group.add_child(info)
info.set_markup('<markup><b>Latitude:</b> %s\n<b>Longitude:</b> %s</markup>' % (self.stop.latitude, self.stop.longitude))
routes = Clutter.Text()
if len(self.stop.trip_routes) > 0:
route_names = ', '.join([x.route.short_name for x in self.stop.trip_routes])
else:
route_names = 'None'
routes.set_markup('<markup><b>Routes:</b> %s</markup>' % route_names)
routes.set_size(200, 75)
routes.set_position(10, 100)
routes.set_anchor_point(0, 0)
group.add_child(routes)
# see if we have a picture (or more)
if len(self.stop.pictures) > 0:
try:
picture_box = Clutter.Texture()
# just use the first picture for now
picture = self.stop.pictures[0]
if picture.thumbnail:
picture_box.set_from_file(picture.thumbnail)
else:
picture_box.set_from_file(picture.image)
w, h = picture_box.get_base_size()
picture_box.set_keep_aspect_ratio(True)
picture_box.set_anchor_point(0, 0)
if picture.orientation in (90, -90):
#!mwd - I have no idea how the fuck clutter is rotation this
# It seems as though the bounding box doesn't change
# so I'm just making up some position numbers
picture_box.set_width(100)
picture_box.set_position(width - ((h/w) * 100) - (w/2) - 45, 60)
picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
else:
picture_box.set_height(100)
picture_box.set_position(width - ((w/h) * 100) - (w/2) - 25, 50)
#!lukstafi -- changed button-release to button-press
picture_box.connect('button-press-event', self.on_expand_picture, picture)
picture_box.set_reactive(True)
group.add_child(picture_box)
except GLib.GError, e:
print >> sys.stderr, 'Error loading image', e
self.gtmap.show_popup(self, group)
self._visible = True
def hide(self):
self.gtmap.unshow_popup(self)
self._visible = False
self._update_color()
def update(self):
self._update_color()
if self._visible:
self.show()
def _update_color(self):
if self.stop:
if len(self.stop.trip_routes) > 0:
# we have routes associated with us
self.marker.set_background_color(self.route_color)
return
elif len(self.stop.pictures) > 0:
if self.stop.name != None and len(self.stop.name) > 0:
# picture and we have a name
self.marker.set_background_color(self.modified_color)
else:
# we have picture associated with us, but no name
self.marker.set_background_color(self.picture_color)
return
# default color
self.marker.set_background_color(self.unselected_color)
| gpl-3.0 | -5,898,807,832,328,061,000 | 32.889655 | 129 | 0.577941 | false |
piontec/docker-enforcer | test/test_docker_image_helper.py | 1 | 2127 | import unittest
from unittest.mock import create_autospec
import docker
from docker.errors import NotFound
from dockerenforcer.config import Config
from dockerenforcer.docker_image_helper import DockerImageHelper
class DockerHelperTests(unittest.TestCase):
def setUp(self):
self._config = Config()
self._client = create_autospec(docker.APIClient)
self._helper = DockerImageHelper(self._config, self._client)
self._image_id = 'sha256:7f6f52e2942811a77591960a62e9e88c2249c976b3fb83bf73aa1e9e570dfc51'
self._image_name1 = 'test1:latest'
self._image_name2 = 'test2:latest'
def test_get_image_uniq_tag_by_id__when_empty_inspect(self):
self._client.inspect_image.return_value = {}
image_tag = self._helper.get_image_uniq_tag_by_id(self._image_id)
self._client.inspect_image.assert_called_once_with(self._image_id)
self.assertEqual(self._image_id, image_tag)
def test_get_image_uniq_tag_by_id__when_empty_repo_tags(self):
self._client.inspect_image.return_value = {'RepoTags': []}
image_tag = self._helper.get_image_uniq_tag_by_id(self._image_id)
self._client.inspect_image.assert_called_once_with(self._image_id)
self.assertEqual(self._image_id, image_tag)
def test_get_image_uniq_tag_by_id__image_not_found(self):
self._client.inspect_image.side_effect = NotFound('Image not found')
image_tag = self._helper.get_image_uniq_tag_by_id(self._image_id)
self.assertEqual(self._image_id, image_tag)
def test_get_image_uniq_tag_by_id__when_single_repo_tag(self):
self._client.inspect_image.return_value = {'RepoTags': [self._image_name1]}
image_tag = self._helper.get_image_uniq_tag_by_id(self._image_id)
self.assertEqual(self._image_name1, image_tag)
def test_get_image_uniq_tag_by_id__when_many_repo_tags(self):
self._client.inspect_image.return_value = {'RepoTags': [self._image_name2, self._image_name1]}
image_tag = self._helper.get_image_uniq_tag_by_id(self._image_id)
self.assertEqual(self._image_name2, image_tag)
| gpl-3.0 | 6,659,342,909,781,258,000 | 46.266667 | 102 | 0.691584 | false |
bikash/kaggleCompetition | microsoft malware/code/_untuned_modeling.py | 1 | 5556 | ######################################################
# _untuned_modeling.py
# author: Gert Jacobusse, [email protected]
# licence: FreeBSD
"""
Copyright (c) 2015, Gert Jacobusse
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#first run feature_extraction.py
#then run this file from the same directory
######################################################
# import dependencies
import csv
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.ensemble import GradientBoostingClassifier,ExtraTreesClassifier
from sklearn.metrics import log_loss
######################################################
# list ids and labels
trainids=[]
labels=[]
with open('trainLabels.csv','r') as f:
r=csv.reader(f)
r.next() # skip header
for row in r:
trainids.append(row[0])
labels.append(float(row[1]))
testids=[]
with open('sampleSubmission.csv','r') as f:
r=csv.reader(f)
r.next()
for row in r:
testids.append(row[0])
######################################################
# general functions
def readdata(fname,header=True,selectedcols=None):
with open(fname,'r') as f:
r=csv.reader(f)
names = r.next() if header else None
if selectedcols:
assert header==True
data = [[float(e) for i,e in enumerate(row) if names[i] in selectedcols] for row in r]
names = [name for name in names if name in selectedcols]
else:
data = [[float(e) for e in row] for row in r]
return data,names
def writedata(data,fname,header=None):
with open(fname,'w') as f:
w=csv.writer(f)
if header:
w.writerow(header)
for row in data:
w.writerow(row)
######################################################
# cross validation
"""
function docv
input: classifier, kfolds object, features, labels, number of data rows
output: holdout-set-predictions for all rows
* run cross validation
"""
def docv(clf,kf,x,y,nrow,nlab=9):
pred = np.zeros((nrow,nlab))
for trainidx, testidx in kf:
clf.fit(x[trainidx],y[trainidx])
pred[testidx] = clf.predict_proba(x[testidx])
return pred
"""
function runcv
input: name of train/ test file, classifier 1 and 2 to be used
output: writes holdout-set-predictions for all rows to file
* run cross validation by calling docv for both classifiers, combine and save results
"""
def runcv(filename,c1,c2):
y=np.array(labels)
nrow=len(y)
x,_=readdata('train_%s'%filename)
x=np.array(x)
kf = KFold(nrow,10,shuffle=True)
p1=docv(c1,kf,x,y,nrow)
p2=docv(c2,kf,x,y,nrow)
pcombi=0.667*p1+0.333*p2
print '%.4f %.4f %.4f'%(log_loss(y,p1),log_loss(y,p2),log_loss(y,pcombi))
with open('pred_%s'%filename,'w') as f:
w=csv.writer(f)
for row in pcombi:
w.writerow(row)
######################################################
# submit and print feature importance
"""
function writesubm
input: name of train/ test file, classifier 1 and 2 to be used
output: writes testset predictions to file
* train classifiers using all traindata, create testset predictions, combine and save results
"""
def writesubm(filename,c1,c2):
xtrain,names=readdata('train_%s'%filename)
xtest,_=readdata('test_%s'%filename)
c1.fit(xtrain,labels)
c2.fit(xtrain,labels)
p1=c1.predict_proba(xtest)
p2=c2.predict_proba(xtest)
p=0.667*p1+0.333*p2
with open('subm_%s'%filename,'w') as f:
w=csv.writer(f)
w.writerow(['Id']+['Prediction%d'%num for num in xrange(1,10)])
for inum,i in enumerate(testids):
w.writerow([i]+list(p[inum]))
######################################################
# go
if __name__ == '__main__':
gbm=GradientBoostingClassifier(
n_estimators=400, max_features=5)
xtr=ExtraTreesClassifier(
n_estimators=400,max_features=None,
min_samples_leaf=2,min_samples_split=3,
n_jobs=7)
for filename in [
'45c.csv',
]:
print filename
runcv(filename,gbm,xtr)
writesubm(filename,gbm,xtr)
print ''
"""
45c.csv
0.0117 0.0168 0.0101
public LB: 0.008071379
private LB: 0.007615772
""" | apache-2.0 | 8,355,478,780,202,789,000 | 31.497076 | 98 | 0.62653 | false |
jalabort/ijcv-2014-aam | aam/image/test/image_test.py | 1 | 18144 | import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from nose.tools import raises
from menpo.testing import is_same_array
from menpo.image import BooleanImage, MaskedImage, Image
@raises(ValueError)
def test_create_1d_error():
Image(np.ones(1))
def test_image_n_elements():
image = Image(np.ones((10, 10, 3)))
assert(image.n_elements == 10 * 10 * 3)
def test_image_width():
image = Image(np.ones((6, 4, 3)))
assert(image.width == 4)
def test_image_height():
image = Image(np.ones((6, 4, 3)))
assert(image.height == 6)
def test_image_blank():
image = Image(np.zeros((6, 4, 1)))
image_blank = Image.blank((6, 4))
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_fill():
image = Image(np.ones((6, 4, 1)) * 7)
image_blank = Image.blank((6, 4), fill=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_n_channels():
image = Image(np.zeros((6, 4, 7)))
image_blank = Image.blank((6, 4), n_channels=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_centre():
pixels = np.ones((10, 20, 1))
image = Image(pixels)
assert(np.all(image.centre == np.array([5, 10])))
def test_image_str_shape_4d():
pixels = np.ones((10, 20, 11, 12, 1))
image = Image(pixels)
assert(image._str_shape == '10 x 20 x 11 x 12')
def test_image_str_shape_2d():
pixels = np.ones((10, 20, 1))
image = Image(pixels)
assert(image._str_shape == '20W x 10H')
def test_image_as_vector():
pixels = np.random.rand(10, 20, 1)
image = Image(pixels)
assert(np.all(image.as_vector() == pixels.ravel()))
def test_image_as_vector_keep_channels():
pixels = np.random.rand(10, 20, 2)
image = Image(pixels)
assert(np.all(image.as_vector(keep_channels=True) ==
pixels.reshape([-1, 2])))
def test_image_from_vector():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel())
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_custom_channels():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 3)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3)
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), copy=False)
assert(is_same_array(image2.pixels, pixels2))
def test_image_from_vector_inplace_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=False)
assert(is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_no_copy_warning():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector_inplace(pixels2.ravel()[::-1], copy=False)
assert len(w) == 1
def test_image_from_vector_inplace_copy_default():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel())
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_copy_explicit():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 2)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=True)
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_custom_channels_no_copy():
pixels = np.random.rand(10, 20, 2)
pixels2 = np.random.rand(10, 20, 3)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3, copy=False)
assert(is_same_array(image2.pixels, pixels2))
@raises(ValueError)
def test_boolean_image_wrong_round():
BooleanImage.blank((12, 12), round='ads')
def test_boolean_image_proportion_true():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_true == 0.3)
def test_boolean_image_proportion_false():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_false == 0.7)
def test_boolean_image_proportion_sums():
image = BooleanImage.blank((10, 10))
image.pixels[:7] = False
assert(image.proportion_true + image.proportion_false == 1)
def test_boolean_image_false_indices():
image = BooleanImage.blank((2, 3))
image.pixels[0, 1] = False
image.pixels[1, 2] = False
assert(np.all(image.false_indices == np.array([[0, 1],
[1, 2]])))
def test_boolean_image_false_indices():
image = BooleanImage.blank((2, 3))
assert(image.__str__() == '3W x 2H 2D mask, 100.0% of which is True')
def test_boolean_image_from_vector():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
image2 = image.from_vector(vector)
assert(np.all(image2.as_vector() == vector))
def test_boolean_image_from_vector_no_copy():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
image2 = image.from_vector(vector, copy=False)
assert(is_same_array(image2.pixels.ravel(), vector))
def test_boolean_image_from_vector_no_copy_raises():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.blank((4, 4))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector(vector[::-1], copy=False)
assert len(w) == 1
def test_boolean_image_invert_inplace():
image = BooleanImage.blank((4, 4))
image.invert_inplace()
assert(np.all(image.pixels == False))
def test_boolean_image_invert_inplace_double_noop():
image = BooleanImage.blank((4, 4))
image.invert_inplace()
image.invert_inplace()
assert(np.all(image.pixels == True))
def test_boolean_image_invert():
image = BooleanImage.blank((4, 4))
image2 = image.invert()
assert(np.all(image.pixels == True))
assert(np.all(image2.pixels == False))
def test_boolean_bounds_false():
mask = BooleanImage.blank((8, 8), fill=True)
mask.pixels[1, 2] = False
mask.pixels[5, 4] = False
mask.pixels[3:2, 3] = False
min_b, max_b = mask.bounds_false()
assert(np.all(min_b == np.array([1, 2])))
assert(np.all(max_b == np.array([5, 4])))
@raises(ValueError)
def test_boolean_prevent_order_kwarg():
mask = BooleanImage.blank((8, 8), fill=True)
mask.warp_to(mask, None, order=4)
def test_create_image_copy_false():
pixels = np.ones((100, 100, 1))
image = Image(pixels, copy=False)
assert (is_same_array(image.pixels, pixels))
def test_create_image_copy_true():
pixels = np.ones((100, 100, 1))
image = Image(pixels)
assert (not is_same_array(image.pixels, pixels))
def test_create_image_copy_false_not_c_contiguous():
pixels = np.ones((100, 100, 1), order='F')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Image(pixels, copy=False)
assert(len(w) == 1)
def mask_image_3d_test():
mask_shape = (120, 121, 13)
mask_region = np.ones(mask_shape)
return BooleanImage(mask_region)
def test_mask_creation_basics():
mask_shape = (120, 121, 3)
mask_region = np.ones(mask_shape)
mask = BooleanImage(mask_region)
assert_equal(mask.n_channels, 1)
assert_equal(mask.n_dims, 3)
assert_equal(mask.shape, mask_shape)
def test_mask_blank():
mask = BooleanImage.blank((56, 12, 3))
assert (np.all(mask.pixels))
def test_boolean_copy_false_boolean():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask, copy=False)
assert (is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_true():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask)
assert (not is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_false_non_boolean():
mask = np.zeros((10, 10))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
BooleanImage(mask, copy=False)
assert(len(w) == 1)
def test_mask_blank_rounding_floor():
mask = BooleanImage.blank((56.1, 12.1), round='floor')
assert_allclose(mask.shape, (56, 12))
def test_mask_blank_rounding_ceil():
mask = BooleanImage.blank((56.1, 12.1), round='ceil')
assert_allclose(mask.shape, (57, 13))
def test_mask_blank_rounding_round():
mask = BooleanImage.blank((56.1, 12.6), round='round')
assert_allclose(mask.shape, (56, 13))
def test_mask_blank_false_fill():
mask = BooleanImage.blank((56, 12, 3), fill=False)
assert (np.all(~mask.pixels))
def test_mask_n_true_n_false():
mask = BooleanImage.blank((64, 14), fill=False)
assert_equal(mask.n_true, 0)
assert_equal(mask.n_false, 64 * 14)
mask.mask[0, 0] = True
mask.mask[9, 13] = True
assert_equal(mask.n_true, 2)
assert_equal(mask.n_false, 64 * 14 - 2)
def test_mask_true_indices():
mask = BooleanImage.blank((64, 14, 51), fill=False)
mask.mask[0, 2, 5] = True
mask.mask[5, 13, 4] = True
true_indices = mask.true_indices
true_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(true_indices, true_indices_test)
def test_mask_false_indices():
mask = BooleanImage.blank((64, 14, 51), fill=True)
mask.mask[0, 2, 5] = False
mask.mask[5, 13, 4] = False
false_indices = mask.false_indices
false_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(false_indices, false_indices_test)
def test_mask_true_bounding_extent():
mask = BooleanImage.blank((64, 14, 51), fill=False)
mask.mask[0, 13, 5] = True
mask.mask[5, 2, 4] = True
tbe = mask.bounds_true()
true_extends_mins = np.array([0, 2, 4])
true_extends_maxs = np.array([5, 13, 5])
assert_equal(tbe[0], true_extends_mins)
assert_equal(tbe[1], true_extends_maxs)
def test_3channel_image_creation():
pixels = np.ones((120, 120, 3))
MaskedImage(pixels)
def test_no_channels_image_creation():
pixels = np.ones((120, 120))
MaskedImage(pixels)
def test_create_MaskedImage_copy_false_mask_array():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_false_mask_BooleanImage():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_array():
pixels = np.ones((100, 100))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_BooleanImage():
pixels = np.ones((100, 100, 1))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=True)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_2d_crop_without_mask():
pixels = np.ones((120, 120, 3))
im = MaskedImage(pixels)
cropped_im = im.crop([10, 50], [20, 60])
assert (cropped_im.shape == (10, 10))
assert (cropped_im.n_channels == 3)
assert (np.alltrue(cropped_im.shape))
def test_2d_crop_with_mask():
pixels = np.ones((120, 120, 3))
mask = np.zeros_like(pixels[..., 0])
mask[10:100, 20:30] = 1
im = MaskedImage(pixels, mask=mask)
cropped_im = im.crop([0, 0], [20, 60])
assert (cropped_im.shape == (20, 60))
assert (np.alltrue(cropped_im.shape))
def test_normalize_std_default():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_std_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.std(image.pixels), 1)
def test_normalize_norm_default():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_norm_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.linalg.norm(image.pixels), 1)
@raises(ValueError)
def test_normalize_std_no_variance_exception():
pixels = np.ones((120, 120, 3))
pixels[..., 0] = 0.5
pixels[..., 1] = 0.2345
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
@raises(ValueError)
def test_normalize_norm_zero_norm_exception():
pixels = np.zeros((120, 120, 3))
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
def test_normalize_std_per_channel():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_norm_per_channel():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_std_masked():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_std_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=0), 1)
def test_normalize_norm_masked():
pixels = np.random.randn(120, 120, 3)
pixels[..., 1] *= 7
pixels[..., 0] += -14
pixels[..., 2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_norm_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=0), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=0), 1)
def test_rescale_single_num():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.rescale(0.5)
assert_allclose(new_image.shape, (60, 60))
def test_rescale_tuple():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.rescale([0.5, 2.0])
assert_allclose(new_image.shape, (60, 240))
@raises(ValueError)
def test_rescale_negative():
image = MaskedImage(np.random.randn(120, 120, 3))
image.rescale([0.5, -0.5])
@raises(ValueError)
def test_rescale_negative_single_num():
image = MaskedImage(np.random.randn(120, 120, 3))
image.rescale(-0.5)
def test_rescale_boundaries_interpolation():
image = MaskedImage(np.random.randn(60, 60, 3))
for i in [x * 0.1 for x in range(1, 31)]:
image_rescaled = image.rescale(i)
assert_allclose(image_rescaled.mask.proportion_true, 1.0)
def test_resize():
image = MaskedImage(np.random.randn(120, 120, 3))
new_size = (250, 250)
new_image = image.resize(new_size)
assert_allclose(new_image.shape, new_size)
def test_as_greyscale_luminosity():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='luminosity')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
def test_as_greyscale_average():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='average')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
@raises(ValueError)
def test_as_greyscale_channels_no_index():
image = MaskedImage(np.ones([120, 120, 3]))
new_image = image.as_greyscale(mode='channel')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
def test_as_greyscale_channels():
image = MaskedImage(np.random.randn(120, 120, 3))
new_image = image.as_greyscale(mode='channel', channel=0)
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
assert_allclose(new_image.pixels[..., 0], image.pixels[..., 0])
def test_as_pil_image_1channel():
im = MaskedImage(np.random.randn(120, 120, 1))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_as_pil_image_3channels():
im = MaskedImage(np.random.randn(120, 120, 3))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_image_gradient_sanity():
# Only a sanity check - does it run and generate sensible output?
image = Image(np.zeros([120, 120, 3]))
new_image = image.gradient()
assert(type(new_image) == Image)
assert(new_image.shape == image.shape)
assert(new_image.n_channels == image.n_channels * 2)
| bsd-2-clause | 23,672,551,868,986,324 | 29.089552 | 76 | 0.641534 | false |
nevins-b/lemur | lemur/plugins/lemur_openssl/plugin.py | 1 | 4304 | """
.. module: lemur.plugins.lemur_openssl.plugin
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from io import open
import subprocess
from flask import current_app
from lemur.utils import mktempfile, mktemppath
from lemur.plugins.bases import ExportPlugin
from lemur.plugins import lemur_openssl as openssl
from lemur.common.utils import get_psuedo_random_string
def run_process(command):
"""
Runs a given command with pOpen and wraps some
error handling around it.
:param command:
:return:
"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
current_app.logger.debug(command)
stdout, stderr = p.communicate()
if p.returncode != 0:
current_app.logger.debug(" ".join(command))
current_app.logger.error(stderr)
raise Exception(stderr)
def create_pkcs12(cert, chain, p12_tmp, key, alias, passphrase):
"""
Creates a pkcs12 formated file.
:param cert:
:param chain:
:param p12_tmp:
:param key:
:param alias:
:param passphrase:
"""
if isinstance(cert, bytes):
cert = cert.decode('utf-8')
if isinstance(chain, bytes):
chain = chain.decode('utf-8')
if isinstance(key, bytes):
key = key.decode('utf-8')
with mktempfile() as key_tmp:
with open(key_tmp, 'w') as f:
f.write(key)
# Create PKCS12 keystore from private key and public certificate
with mktempfile() as cert_tmp:
with open(cert_tmp, 'w') as f:
if chain:
f.writelines([cert.strip() + "\n", chain.strip() + "\n"])
else:
f.writelines([cert.strip() + "\n"])
run_process([
"openssl",
"pkcs12",
"-export",
"-name", alias,
"-in", cert_tmp,
"-inkey", key_tmp,
"-out", p12_tmp,
"-password", "pass:{}".format(passphrase)
])
class OpenSSLExportPlugin(ExportPlugin):
title = 'OpenSSL'
slug = 'openssl-export'
description = 'Is a loose interface to openssl and support various formats'
version = openssl.VERSION
author = 'Kevin Glisson'
author_url = 'https://github.com/netflix/lemur'
options = [
{
'name': 'type',
'type': 'select',
'required': True,
'available': ['PKCS12 (.p12)'],
'helpMessage': 'Choose the format you wish to export',
},
{
'name': 'passphrase',
'type': 'str',
'required': False,
'helpMessage': 'If no passphrase is given one will be generated for you, we highly recommend this. Minimum length is 8.',
'validation': ''
},
{
'name': 'alias',
'type': 'str',
'required': False,
'helpMessage': 'Enter the alias you wish to use for the keystore.',
}
]
def export(self, body, chain, key, options, **kwargs):
"""
Generates a Java Keystore or Truststore
:param key:
:param chain:
:param body:
:param options:
:param kwargs:
"""
if self.get_option('passphrase', options):
passphrase = self.get_option('passphrase', options)
else:
passphrase = get_psuedo_random_string()
if self.get_option('alias', options):
alias = self.get_option('alias', options)
else:
alias = "blah"
type = self.get_option('type', options)
with mktemppath() as output_tmp:
if type == 'PKCS12 (.p12)':
if not key:
raise Exception("Private Key required by {0}".format(type))
create_pkcs12(body, chain, output_tmp, key, alias, passphrase)
extension = "p12"
else:
raise Exception("Unable to export, unsupported type: {0}".format(type))
with open(output_tmp, 'rb') as f:
raw = f.read()
return extension, passphrase, raw
| apache-2.0 | 4,822,535,974,338,497,000 | 28.278912 | 133 | 0.549954 | false |
alextingle/autoclapper | autoclapper.py | 1 | 7762 | #! /usr/bin/env python
## Support for byteswapping audio streams (needed for AIFF format).
_typecode = {2:'h'}
def _init_typecode():
import array
for t in ('i', 'l'):
a = array.array(t)
if a.itemsize==4:
_typecode[4] = t
return
import sys
print "Can't find array typecode for 4 byte ints."
sys.exit(1)
_init_typecode()
def _byteswap(s,n):
"""Byteswap stream s, which is of width n bytes. Does nothing if n is 1.
Only supports widths listed in _typecode (2 & 4)."""
if n==1:
return s
import array
a = array.array( _typecode[n], s )
a.byteswap()
return a.tostring()
def _null(s,n):
"""Do nothing to stream s, which is of width n. See also: _byteswap(s,n)"""
return s
class SoundFile(object):
'''Wrapper for PCM sound stream, can be AIFF (aifc module)
or WAV (wave module).'''
def __init__(self, fname, template_obj=None):
if fname[-5:].lower() == '.aiff':
self._mod = __import__('aifc')
self._conv = _byteswap # AIFF is big-endian.
elif fname[-4:].lower() == '.wav':
self._mod = __import__('wave')
self._conv = _null
else:
print 'Unknown extension:', fname
import sys
sys.exit(1)
if template_obj:
# We will create & write to this file.
self.init_from_template(fname, template_obj)
else:
# We load from this file.
self.load(fname)
def bytes_per_frame(self):
return self.stream.getsampwidth() * self.stream.getnchannels()
def bytes_per_second(self):
return self.stream.getframerate() * self.bytes_per_frame()
def load(self, in_fname):
print 'load', self._mod.__name__, in_fname
self.stream = self._mod.open(in_fname, 'rb')
def read_lin(self):
fragment = self.stream.readframes( self.stream.getnframes() )
return self._conv(fragment, self.stream.getsampwidth())
def init_from_template(self, out_fname, template_obj):
print 'create', self._mod.__name__, out_fname
self.stream = self._mod.open(out_fname, 'wb')
self.stream.setnchannels( template_obj.stream.getnchannels() )
self.stream.setsampwidth( template_obj.stream.getsampwidth() )
self.stream.setframerate( template_obj.stream.getframerate() )
def write_lin(self, fragment):
self.stream.writeframes(self._conv(fragment, self.stream.getsampwidth()))
def close(self):
self.stream.close()
def coerce_lin(source_aiff, template_obj):
'''Read data from source, and convert it to match template's params.'''
import audioop
frag = source_aiff.read_lin()
Ss = source_aiff.stream
St = template_obj.stream
# Sample width
if Ss.getsampwidth() != St.getsampwidth():
print 'coerce sampwidth %i -> %i' %(Ss.getsampwidth(), St.getsampwidth())
frag = audioop.lin2lin(frag, Ss.getsampwidth(), St.getsampwidth())
width = St.getsampwidth()
# Channels
if Ss.getnchannels() != St.getnchannels():
print 'coerce nchannels %i -> %i' %(Ss.getnchannels(), St.getnchannels())
if Ss.getnchannels()==2 and St.getnchannels()==1:
frag = audioop.tomono(frag, width, 0.5, 0.5)
elif Ss.getnchannels()==1 and St.getnchannels()==2:
frag = audioop.tostereo(frag, width, 1.0, 1.0)
else:
print "Err: can't match channels"
# Frame rate
if Ss.getframerate() != St.getframerate():
print 'coerce framerate %i -> %i' %(Ss.getframerate(), St.getframerate())
frag,state = audioop.ratecv(
frag, width,
St.getnchannels(),
Ss.getframerate(), # in rate
St.getframerate(), # out rate
None, 2,1
)
return frag
def findfit(scratch_frag, final_frag, sound_file):
'''Calculates the offset (in seconds) between scratch_frag & final_frag.
Both fragments are assumed to contain the same, loud "clapper" event.
The SoundFile object is used for common stream parameters.'''
import audioop
nchannels = sound_file.stream.getnchannels()
framerate = sound_file.stream.getframerate()
width = sound_file.stream.getsampwidth()
assert(width==2)
# Simplify the sound streams to make it quicker to find a match.
# Left channel only.
if nchannels > 1:
scratch_frag_ = audioop.tomono(scratch_frag, width, 1, 0)
final_frag_ = audioop.tomono(final_frag, width, 1, 0)
else:
scratch_frag_ = scratch_frag
final_frag_ = final_frag
nchannels_ = 1
# Downsample to 8000/sec
framerate_ = 8000
scratch_frag_,state =\
audioop.ratecv(scratch_frag_, width, nchannels_, framerate, framerate_, None)
final_frag_,state =\
audioop.ratecv(final_frag_, width, nchannels_, framerate, framerate_, None)
bytes_per_second_ = nchannels_ * framerate_ * width
# Find the clapper in final
length_samples = int(0.001 * framerate * nchannels_) # 0.1 sec
final_off_samples = audioop.findmax(final_frag_, length_samples)
# Search for a 2 second 'needle' centred on where we found the 'clapper'
needle_bytes = 2 * bytes_per_second_
b0 = max(0, final_off_samples * width - int(needle_bytes/2))
print '"clapper" at final:', 1.0*b0/bytes_per_second_, 'sec'
b1 = b0 + needle_bytes
final_clapper_frag = final_frag_[b0:b1]
scratch_off_samples,factor = audioop.findfit(scratch_frag_, final_clapper_frag)
scratch_off_bytes = scratch_off_samples * width
print 'match at scratch:', 1.0*scratch_off_bytes/bytes_per_second_, 'sec', " factor =",factor
# Calculate the offset (shift) between the two fragments.
shift_sec = (scratch_off_bytes - b0) * 1.0 / bytes_per_second_
print 'shift =', shift_sec, 'seconds'
return shift_sec
def autoclapper(in_scratch_fname, in_final_fname, out_fname):
"""Read WAV- or AIFF-format files in_scratch_fname (a scratch audio track,
taken from a video) & in_final_fname (a final-quality audio track of
the same scene). Shift the 'final' stream to match the 'scratch' track,
and write it out to out_fname. The result is a file that can be used
directly as the video's sound-track."""
# Read in the input streams.
scratch = SoundFile( in_scratch_fname )
final = SoundFile( in_final_fname )
print 'scratch', scratch.stream.getparams()
print 'final ', final.stream.getparams()
scratch_frag = coerce_lin(scratch, final)
final_frag = final.read_lin()
## Shift final_frag to match scratch_frag
shift_sec = findfit(scratch_frag, final_frag, final)
shift_frames = int(shift_sec * final.stream.getframerate())
shift_bytes = shift_frames * final.bytes_per_frame()
print 'shift', shift_bytes, 'bytes'
if shift_bytes > 0:
final_frag = '\0' * shift_bytes + final_frag
elif shift_bytes < 0:
final_frag = final_frag[-shift_bytes:]
## Set final_frag length to match scratch_frag
if len(final_frag) > len(scratch_frag):
final_frag = final_frag[:len(scratch_frag)]
elif len(final_frag) < len(scratch_frag):
final_frag += '\0' * (len(scratch_frag) - len(final_frag))
# Write out the result.
sink = SoundFile( out_fname, final )
sink.write_lin( final_frag )
sink.close()
if __name__=='__main__':
import sys
if sys.argv[1] in ('-h', '--help', '-?'):
print 'syntax: python autoclapper.py IN_SCRATCH_FNAME IN_FINAL_FNAME OUT_FNAME'
print
print autoclapper.__doc__
print """
You can use "avconv" (or "ffmpeg") to extract audio tracks from video.
Example:
$ avconv -i raw_video.avi scratch.wav
$ python autoclapper.py scratch.wav raw_final.wav synced_final.wav
$ avconv -i raw_video.avi -i synced_final.wav -map 0:0 -map 1:0 -codec copy video.avi
"""
sys.exit(0)
in_scratch_fname = sys.argv[1]
in_final_fname = sys.argv[2]
out_fname = sys.argv[3]
autoclapper(in_scratch_fname, in_final_fname, out_fname)
| agpl-3.0 | 5,807,491,492,278,945,000 | 32.456897 | 95 | 0.652409 | false |
oneconvergence/group-based-policy | gbpservice/neutron/services/grouppolicy/extension_manager.py | 1 | 11825 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.openstack.common import log
from oslo.config import cfg
import stevedore
LOG = log.getLogger(__name__)
class ExtensionManager(stevedore.named.NamedExtensionManager):
"""Manage extension drivers using drivers."""
def __init__(self):
# Ordered list of extension drivers, defining
# the order in which the drivers are called.
self.ordered_ext_drivers = []
LOG.info(_("Configured extension driver names: %s"),
cfg.CONF.group_policy.extension_drivers)
super(ExtensionManager, self).__init__(
'gbpservice.neutron.group_policy.extension_drivers',
cfg.CONF.group_policy.extension_drivers,
invoke_on_load=True,
name_order=True)
LOG.info(_("Loaded extension driver names: %s"), self.names())
self._register_drivers()
def _register_drivers(self):
"""Register all extension drivers.
This method should only be called once in the ExtensionManager
constructor.
"""
for ext in self:
self.ordered_ext_drivers.append(ext)
LOG.info(_("Registered extension drivers: %s"),
[driver.name for driver in self.ordered_ext_drivers])
def initialize(self):
# Initialize each driver in the list.
for driver in self.ordered_ext_drivers:
LOG.info(_("Initializing extension driver '%s'"), driver.name)
driver.obj.initialize()
def extension_aliases(self):
exts = []
for driver in self.ordered_ext_drivers:
alias = driver.obj.extension_alias
exts.append(alias)
LOG.info(_("Got %(alias)s extension from driver '%(drv)s'"),
{'alias': alias, 'drv': driver.name})
return exts
def _call_on_ext_drivers(self, method_name, session, data, result):
"""Helper method for calling a method across all extension drivers."""
for driver in self.ordered_ext_drivers:
try:
getattr(driver.obj, method_name)(session, data, result)
except Exception:
LOG.exception(
_("Extension driver '%(name)s' failed in %(method)s"),
{'name': driver.name, 'method': method_name}
)
def process_create_policy_target(self, session, data, result):
"""Call all extension drivers during PT creation."""
self._call_on_ext_drivers("process_create_policy_target",
session, data, result)
def process_update_policy_target(self, session, data, result):
"""Call all extension drivers during PT update."""
self._call_on_ext_drivers("process_update_policy_target",
session, data, result)
def extend_policy_target_dict(self, session, result):
"""Call all extension drivers to extend PT dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_target_dict(session, result)
def process_create_policy_target_group(self, session, data, result):
"""Call all extension drivers during PTG creation."""
self._call_on_ext_drivers("process_create_policy_target_group",
session, data, result)
def process_update_policy_target_group(self, session, data, result):
"""Call all extension drivers during PTG update."""
self._call_on_ext_drivers("process_update_policy_target_group",
session, data, result)
def extend_policy_target_group_dict(self, session, result):
"""Call all extension drivers to extend PTG dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_target_group_dict(session, result)
def process_create_l2_policy(self, session, data, result):
"""Call all extension drivers during L2P creation."""
self._call_on_ext_drivers("process_create_l2_policy",
session, data, result)
def process_update_l2_policy(self, session, data, result):
"""Call all extension drivers during L2P update."""
self._call_on_ext_drivers("process_update_l2_policy",
session, data, result)
def extend_l2_policy_dict(self, session, result):
"""Call all extension drivers to extend L2P dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_l2_policy_dict(session, result)
def process_create_l3_policy(self, session, data, result):
"""Call all extension drivers during L3P creation."""
self._call_on_ext_drivers("process_create_l3_policy",
session, data, result)
def process_update_l3_policy(self, session, data, result):
"""Call all extension drivers during L3P update."""
self._call_on_ext_drivers("process_update_l3_policy",
session, data, result)
def extend_l3_policy_dict(self, session, result):
"""Call all extension drivers to extend L3P dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_l3_policy_dict(session, result)
def process_create_policy_classifier(self, session, data, result):
"""Call all extension drivers during PC creation."""
self._call_on_ext_drivers("process_create_policy_classifier",
session, data, result)
def process_update_policy_classifier(self, session, data, result):
"""Call all extension drivers during PC update."""
self._call_on_ext_drivers("process_update_policy_classifier",
session, data, result)
def extend_policy_classifier_dict(self, session, result):
"""Call all extension drivers to extend PC dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_classifier_dict(session, result)
def process_create_policy_action(self, session, data, result):
"""Call all extension drivers during PA creation."""
self._call_on_ext_drivers("process_create_policy_action",
session, data, result)
def process_update_policy_action(self, session, data, result):
"""Call all extension drivers during PA update."""
self._call_on_ext_drivers("process_update_policy_action",
session, data, result)
def extend_policy_action_dict(self, session, result):
"""Call all extension drivers to extend PA dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_action_dict(session, result)
def process_create_policy_rule(self, session, data, result):
"""Call all extension drivers during PR creation."""
self._call_on_ext_drivers("process_create_policy_rule",
session, data, result)
def process_update_policy_rule(self, session, data, result):
"""Call all extension drivers during PR update."""
self._call_on_ext_drivers("process_update_policy_rule",
session, data, result)
def extend_policy_rule_dict(self, session, result):
"""Call all extension drivers to extend PR dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_rule_dict(session, result)
def process_create_policy_rule_set(self, session, data, result):
"""Call all extension drivers during PRS creation."""
self._call_on_ext_drivers("process_create_policy_rule_set",
session, data, result)
def process_update_policy_rule_set(self, session, data, result):
"""Call all extension drivers during PRS update."""
self._call_on_ext_drivers("process_update_policy_rule_set",
session, data, result)
def extend_policy_rule_set_dict(self, session, result):
"""Call all extension drivers to extend PRS dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_policy_rule_set_dict(session, result)
def process_create_network_service_policy(self, session, data, result):
"""Call all extension drivers during NSP creation."""
self._call_on_ext_drivers("process_create_network_service_policy",
session, data, result)
def process_update_network_service_policy(self, session, data, result):
"""Call all extension drivers during NSP update."""
self._call_on_ext_drivers("process_update_network_service_policy",
session, data, result)
def extend_network_service_policy_dict(self, session, result):
"""Call all extension drivers to extend NSP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_network_service_policy_dict(session, result)
def process_create_external_segment(self, session, data, result):
"""Call all extension drivers during EP creation."""
self._call_on_ext_drivers("process_create_external_segment",
session, data, result)
def process_update_external_segment(self, session, data, result):
"""Call all extension drivers during EP update."""
self._call_on_ext_drivers("process_update_external_segment",
session, data, result)
def extend_external_segment_dict(self, session, result):
"""Call all extension drivers to extend EP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_external_segment_dict(session, result)
def process_create_external_policy(self, session, data, result):
"""Call all extension drivers during EP creation."""
self._call_on_ext_drivers("process_create_external_policy",
session, data, result)
def process_update_external_policy(self, session, data, result):
"""Call all extension drivers during EP update."""
self._call_on_ext_drivers("process_update_external_policy",
session, data, result)
def extend_external_policy_dict(self, session, result):
"""Call all extension drivers to extend EP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_external_policy_dict(session, result)
def process_create_nat_pool(self, session, data, result):
"""Call all extension drivers during NP creation."""
self._call_on_ext_drivers("process_create_nat_pool",
session, data, result)
def process_update_nat_pool(self, session, data, result):
"""Call all extension drivers during NP update."""
self._call_on_ext_drivers("process_update_nat_pool",
session, data, result)
def extend_nat_pool_dict(self, session, result):
"""Call all extension drivers to extend NP dictionary."""
for driver in self.ordered_ext_drivers:
driver.obj.extend_nat_pool_dict(session, result) | apache-2.0 | 5,398,071,857,213,786,000 | 45.559055 | 78 | 0.622664 | false |
dtroyer/python-openstacksdk | openstack/object_store/v1/account.py | 1 | 1764 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.object_store.v1 import _base
from openstack import resource
class Account(_base.BaseResource):
_custom_metadata_prefix = "X-Account-Meta-"
base_path = "/"
allow_get = True
allow_update = True
allow_head = True
#: The total number of bytes that are stored in Object Storage for
#: the account.
account_bytes_used = resource.Header("x-account-bytes-used", type=int)
#: The number of containers.
account_container_count = resource.Header("x-account-container-count",
type=int)
#: The number of objects in the account.
account_object_count = resource.Header("x-account-object-count", type=int)
#: The secret key value for temporary URLs. If not set,
#: this header is not returned by this operation.
meta_temp_url_key = resource.Header("x-account-meta-temp-url-key")
#: A second secret key value for temporary URLs. If not set,
#: this header is not returned by this operation.
meta_temp_url_key_2 = resource.Header("x-account-meta-temp-url-key-2")
#: The timestamp of the transaction.
timestamp = resource.Header("x-timestamp")
has_body = False
requires_id = False
| apache-2.0 | -5,670,804,540,234,342,000 | 38.2 | 78 | 0.696145 | false |
civisanalytics/ansible-modules-core | cloud/amazon/iam.py | 1 | 30387 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, user API keys, groups, roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: ["user", "group", "role"]
name:
description:
- Name of IAM resource to create or identify
required: true
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
required: false
default: null
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
required: false
default: null
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
default: null
choices: [ "present", "absent", "update" ]
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match.
required: false
default: "/"
assume_role_policy:
description:
- YAML or JSON formatted document that grants the ability to assume a role.
required: false
default: null
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
required: false
default: null
choices: [ "create", "remove", "active", "inactive"]
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
required: false
default: '1'
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state paramter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
required: false
default: null
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
required: false
default: null
update_password:
required: false
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
with_items:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
with_items: new_groups.results
'''
import json
import itertools
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def _paginate(func, attr):
'''
paginates the results from func by continuously passing in
the returned marker if the results were truncated. this returns
an iterator over the items in the returned response. `attr` is
the name of the attribute to iterate over in the response.
'''
finished, marker = False, None
while not finished:
res = func(marker=marker)
for item in getattr(res, attr):
yield item
finished = res.is_truncated == 'false'
if not finished:
marker = res.marker
def list_all_groups(iam):
return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
def list_all_users(iam):
return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
def list_all_roles(iam):
return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
def list_all_instance_profiles(iam):
return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.\
create_access_key_result.\
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_user(module, iam, name):
del_meta = ''
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
try:
login_profile = iam.get_login_profiles(name).get_login_profile_response
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('Cannot find Login Profile') in error_msg:
del_meta = iam.delete_user(name).delete_user_response
else:
iam.delete_login_profile(name)
del_meta = iam.delete_user(name).delete_user_response
except Exception as ex:
module.fail_json(changed=False, msg="delete failed %s" %ex)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
try:
del_meta = iam.delete_user(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
return del_meta, name, changed
else:
changed = True
return del_meta, name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
if key_state == 'create':
try:
while key_count > key_qty:
new_key = iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key
key_qty += 1
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(
access_key, key_state.capitalize(), user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
if key_state == 'remove':
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explictly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list, assume_role_policy):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name not in role_list:
changed = True
iam_role_result = iam.create_role(
name,
assume_role_policy_document=assume_role_policy,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name,
path=path).create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def delete_role(module, iam, name, role_list, prof_list):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam_role_result = iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
instance_profile_result = iam.delete_instance_profile(name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(
default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
assume_role_policy=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
key_ids = module.params.get('access_key_ids')
assume_role_policy = module.params.get('assume_role_policy')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specificy present or absent")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = list_all_groups(iam)
orig_user_list = list_all_users(iam)
orig_role_list = list_all_roles(iam)
orig_prof_list = list_all_instance_profiles(iam)
except boto.exception.BotoServerError as err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if name_change and new_name:
orig_name = name
name = new_name
if groups:
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exist. No update made." % name)
elif state == 'absent':
if user_exists:
try:
set_users_groups(module, iam, name, '')
del_meta, name, changed = delete_user(module, iam, name)
module.exit_json(deleted_user=name, changed=changed)
except Exception as ex:
module.fail_json(changed=changed, msg=str(ex))
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
iam=iam, name=name, new_name=new_name, new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list, role_result, instance_profile_result = create_role(
module, iam, name, path, orig_role_list, orig_prof_list, assume_role_policy)
elif state == 'absent':
changed, role_list, role_result, instance_profile_result = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
instance_profile_result=instance_profile_result)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | 2,926,000,182,716,711,000 | 39.035573 | 213 | 0.581532 | false |
kinow-io/kinow-python-sdk | test/test_media_sources_api.py | 1 | 1195 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.apis.media_sources_api import MediaSourcesApi
class TestMediaSourcesApi(unittest.TestCase):
""" MediaSourcesApi unit test stubs """
def setUp(self):
self.api = kinow_client.apis.media_sources_api.MediaSourcesApi()
def tearDown(self):
pass
def test_get_media_source(self):
"""
Test case for get_media_source
"""
pass
def test_get_media_source_files(self):
"""
Test case for get_media_source_files
"""
pass
def test_get_media_sources(self):
"""
Test case for get_media_sources
"""
pass
def test_post_media_source_files(self):
"""
Test case for post_media_source_files
"""
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,992,378,790,204,752,000 | 16.573529 | 72 | 0.589958 | false |
titu1994/MobileNetworks | weights/remove_extra_class.py | 1 | 1279 | import h5py
'''
Place all the weight files here (should automatically be placed after running weight_load.py
for all the checkpoints, and then simply run this script to change the weights to support 1000
classes instead of 1001.
'''
base = "mobilenet_"
alphas = ["1_0", "7_5", "5_0", "2_5"]
sizes = [224, 192, 160, 128]
end_str = "_tf.h5"
for alpha in alphas:
for size in sizes:
fn = base + alpha + "_" + str(size) + end_str
print("Working on file : %s" % fn)
f = h5py.File(fn)
classification_layer = f.attrs['layer_names'][-3]
classification_dataset = f[classification_layer]
weights_name = b'conv_preds/kernel:0'
bias_name = b'conv_preds/bias:0'
weights = classification_dataset[weights_name][:]
bias = classification_dataset[bias_name][:]
# remove the first class
weights = weights[..., 1:]
bias = bias[1:]
del classification_dataset[weights_name]
classification_dataset.create_dataset(weights_name, data=weights)
del classification_dataset[bias_name]
classification_dataset.create_dataset(bias_name, data=bias)
f.close()
print("Finished processing weight file : %s" % (fn))
print("Finished processing all weights")
| apache-2.0 | 1,143,303,184,041,994,100 | 29.452381 | 95 | 0.637217 | false |
dakrauth/prolog | prolog/formatters.py | 1 | 4768 | import sys
import logging
from textwrap import indent
from .config import config
__all__ = ['PrologFormatter', 'ColorFormatter', 'Colorize']
class PrologFormatter(logging.Formatter):
DEFAULT_FMT = config.LONG_FMT
DEFAULT_DATEFMT = config.DATE_FMT
DEFAULT_STYLE = config.STYLE_FMT
def __init__(self, fmt=None, datefmt=None, style=None):
super().__init__(
fmt=fmt or self.DEFAULT_FMT,
datefmt=datefmt or self.DEFAULT_DATEFMT,
style=style or self.DEFAULT_STYLE
)
@staticmethod
def to_str(value):
'''
Convert value a string. If value is already a str or None, returne it
unchanged. If value is a byte, decode it as utf8. Otherwise, fall back
to the value's repr.
'''
if value is None:
return ''
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode()
return repr(value)
def formatException(self, ei):
return indent(super().formatException(ei), '... ')
def formatMessage(self, record):
try:
record.msg = self.to_str(record.msg)
record.message = record.getMessage()
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
return super().formatMessage(record)
#return formatted.replace("\n", "\n ")
class Colorize:
fg = {
'gray': '0;30', 'black': '1;30', 'darkgray': '0;30',
'red': '0;31', 'lightred': '1;31', 'darkred': '0;31',
'green': '0;32', 'lightgreen': '1;32', 'darkgreen': '0;32',
'brown': '0;33', 'lightbrown': '1;33', 'darkbrown': '0;33',
'blue': '0;34', 'lightblue': '1;34', 'darkblue': '0;34',
'magenta': '0;35', 'lightmagenta': '1;35', 'darkmagenta': '0;35',
'purple': '0;35', 'lightpurple': '1;35', 'darkpurple': '0;35',
'cyan': '0;36', 'lightcyan': '1;36', 'darkcyan': '0;36',
'lightgray': '0;37', 'white': '1;37', 'yellow': '1;33',
}
bg = {
'black': '40', 'red': '41', 'green': '42',
'brown': '43', 'blue': '44', 'magenta': '45',
'cyan': '46', 'purple': '45', 'gray': '47',
}
reset = '\x1b[0m'
@classmethod
def style(cls, fg='', bg=''):
code_list = []
if fg:
code_list.append(cls.fg[fg])
if bg:
code_list.append(cls.bg[bg])
return '\x1b[{}m'.format(';'.join(code_list)) if code_list else ''
@staticmethod
def supported(stream=sys.stderr):
return hasattr(stream, 'isatty') and stream.isatty()
class ColorFormatter(PrologFormatter):
DEFAULT_FMT = config.COLOR_LONG_FMT
DEFAULT_COLORS = config.LEVEL_COLORS
def __init__(self, fmt=None, datefmt=None, style=None, colors=None):
super().__init__(fmt, datefmt, style)
if Colorize.supported():
self.colors = self.normalize_colors(colors or self.DEFAULT_COLORS)
else:
self.colors = {}
@staticmethod
def normalize_colors(colors):
if isinstance(colors, str):
colors = dict(
bits.split(':') for bits in colors.split(';') if bits
)
colors = {key: val.split(',') for key, val in colors.items()}
default = colors.pop('*', False)
if default:
for level in logging._nameToLevel:
if level not in colors:
colors[level] = default
return colors
def set_colors(self, record):
if record.levelname in self.colors:
record.color = Colorize.style(*self.colors[record.levelname])
record.endcolor = Colorize.reset
else:
record.color = record.endcolor = ''
def formatMessage(self, record):
self.set_colors(record)
return super().formatMessage(record)
registered_formatters = {
'long': PrologFormatter(),
'short': PrologFormatter(fmt=config.SHORT_FMT, datefmt=None),
'color': ColorFormatter(),
}
registered_formatters['default'] = registered_formatters['long']
def get_formatter(arg=None):
if arg is None:
return registered_formatters['default']
elif isinstance(arg, logging.Formatter):
return arg
elif isinstance(arg, str):
try:
return registered_formatters[arg]
except KeyError as e:
msg = '"{}" unrecognized formatter shortcut'.format(arg)
raise KeyError(msg) from e
elif isinstance(arg, dict):
return PrologFormatter(**arg)
else:
return PrologFormatter(*arg)
| mit | 3,918,331,010,708,684,000 | 30.786667 | 78 | 0.551594 | false |
ThomasColliers/whatmigrate | siteconnection.py | 1 | 5571 | # Class that handles What.CD authentication, can download torrents and can search the site log
import os,pycurl,urllib,re,sys,urllib2
from BeautifulSoup import BeautifulSoup
re_main = re.compile(r'<span style="color: red;">(.*?)</span>')
re_detail = re.compile(r' Torrent <a href="torrents\.php\?torrentid=\d+"> \d+</a> \((.*?)\) uploaded by <a href="user\.php\?id=\d+">.*?</a> was deleted by <a href="user\.php\?id=\d+">.*?</a> for the reason: (.*?)$')
re_replacement = re.compile(r'(.*?) \( <a href="torrents\.php\?torrentid=(\d+)">torrents\.php\?torrentid=\d+</a> \)')
class Receiver:
def __init__(self):
self.contents = ""
self.header = ""
def body_callback(self, buffer):
self.contents = self.contents + buffer
def header_callback(self,buffer):
self.header = self.header + buffer
class Connection:
def __init__(self,user,passw,use_ssl):
self.username = user
self.password = passw
self.logintries = 0
if(use_ssl): self.basepath = "https://ssl.what.cd/"
else: self.basepath = "http://what.cd/"
# Set up curl
self.rec = Receiver()
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.FOLLOWLOCATION,1)
self.curl.setopt(pycurl.MAXREDIRS,5)
self.curl.setopt(pycurl.NOSIGNAL,1)
cookiefile = os.path.expanduser("~/.whatmigrate_cookiefile")
self.curl.setopt(pycurl.COOKIEFILE,cookiefile)
self.curl.setopt(pycurl.COOKIEJAR,cookiefile)
self.curl.setopt(pycurl.WRITEFUNCTION,self.rec.body_callback)
self.curl.setopt(pycurl.HEADERFUNCTION,self.rec.header_callback)
# to reset curl after each request
def clearCurl(self):
self.rec.contents = ""
self.rec.header = ""
self.curl.setopt(pycurl.POST,0)
self.curl.setopt(pycurl.POSTFIELDS,"")
# make request
def makeRequest(self,url,post = None):
# make request
self.clearCurl()
self.curl.setopt(pycurl.URL,url)
if(post):
self.curl.setopt(pycurl.POST,1)
self.curl.setopt(pycurl.POSTFIELDS,post)
self.curl.perform()
# check if logged in
if not self.rec.contents.find('id="loginform"') is -1:
self.logintries += 1
if(self.logintries > 1): sys.exit("Site login failed, check your username and password in your configuration file")
self.login()
return self.makeRequest(url,post)
# return result
return self.rec.contents
# login
def login(self):
self.makeRequest(self.basepath+"login.php",
urllib.urlencode([
("username",self.username),
("password",self.password),
("keeplogged",1),
("login","Log in !")
])
)
# strip html
def stripHTML(self,html):
return ''.join(BeautifulSoup(html).findAll(text=True))
# search torrents
def searchTorrents(self,searchstring):
html = self.makeRequest(self.basepath+"torrents.php?searchstr="+urllib.quote(searchstring))
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
table = soup.find("table", {"id":"torrent_table"})
if not table: return False
groups = table.findAll("tr")
results = {}
for group in groups:
classes = group["class"].split(' ')
# parse the groups
if "group" in classes:
copy = unicode(group.findAll('td')[2])
copy = copy[0:copy.find('<span style="float:right;">')]
currentgroup = self.stripHTML(copy).strip()
results[currentgroup] = {}
# parse the edition
elif "edition" in classes:
currentedition = group.td.strong.find(text=True,recursive=False).strip()
if currentgroup: results[currentgroup][currentedition] = []
# parse the torrent
elif "group_torrent" in classes:
torrentdata = {}
torrentdata['format'] = group.td.find('a',recursive=False).text.strip()
torrentdata['size'] = group.findAll('td')[3].text.strip()
dlink = unicode(group.td.a)
regex = re.compile(r'id=(\d+)')
reresult = regex.search(dlink)
if reresult:
torrentdata['id'] = int(reresult.group(1));
else:
continue
if currentedition and currentgroup:
results[currentgroup][currentedition].append(torrentdata)
return results
# download a torrent file
def getTorrentFile(self,torrentid):
result = self.makeRequest(self.basepath+"torrents.php?torrentid=%s" % (torrentid,))
# process result
re_torrentlink = re.compile(r'torrents\.php\?action=download&id='+str(torrentid)+r'\&authkey=.+?&torrent_pass=\w+')
result = re_torrentlink.search(result)
if not result: sys.exit("Could not find torrent with id %s." % (torrentid,))
torrentlink = result.group().replace("&","&")
torrentdata = self.makeRequest(self.basepath+torrentlink)
# parse header to get filename
torrent_filename = torrentid
for line in iter(self.rec.header.splitlines()):
if 'filename=' in line:
torrent_filename = line[line.find('filename=')+10:-1]
return (torrent_filename, torrentdata)
def close(self):
self.curl.close()
| gpl-3.0 | 8,229,056,412,642,731,000 | 41.853846 | 215 | 0.588584 | false |
stackforge/monasca-notification | monasca_notification/plugins/email_notifier.py | 1 | 11064 | # (C) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 Fujitsu LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.header
import email.mime.text
import email.utils
import six
import smtplib
import time
from debtcollector import removals
from oslo_config import cfg
from monasca_notification.plugins import abstract_notifier
CONF = cfg.CONF
EMAIL_SINGLE_HOST_BASE = u'''On host "{hostname}" for target "{target_host}" {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions:
{metric_dimensions}'''
EMAIL_MULTIPLE_HOST_BASE = u'''On host "{hostname}" {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions:
{metric_dimensions}'''
EMAIL_NO_HOST_BASE = u'''On multiple hosts {message}
Alarm "{alarm_name}" transitioned to the {state} state at {timestamp} UTC
Alarm_id: {alarm_id}
Lifecycle state: {lifecycle_state}
Link: {link}
Link to Grafana: {grafana_url}
With dimensions
{metric_dimensions}'''
class EmailNotifier(abstract_notifier.AbstractNotifier):
type = 'email'
def __init__(self, log):
super(EmailNotifier, self).__init__()
self._log = log
self._smtp = None
@removals.remove(
message='Configuration of notifier is available through oslo.cfg',
version='1.9.0',
removal_version='3.0.0'
)
def config(self, config=None):
self._smtp_connect()
@property
def statsd_name(self):
return "sent_smtp_count"
def send_notification(self, notification):
"""Send the notification via email
Returns the True upon success, False upon failure
"""
# Get the "hostname" from the notification metrics if there is one
hostname = []
targethost = []
for metric in notification.metrics:
dimap = metric['dimensions']
if 'hostname' in dimap and not dimap['hostname'] in hostname:
hostname.append(dimap['hostname'])
if 'target_host' in dimap and not dimap['target_host'] in targethost:
targethost.append(dimap['target_host'])
# Generate the message
msg = self._create_msg(hostname, notification, targethost)
if not self._smtp and not self._smtp_connect():
return False
try:
self._sendmail(notification, msg)
return True
except smtplib.SMTPServerDisconnected:
self._log.warn('SMTP server disconnected. '
'Will reconnect and retry message.')
self._smtp_connect()
except smtplib.SMTPException:
self._email_error(notification)
return False
try:
self._sendmail(notification, msg)
return True
except smtplib.SMTPException:
self._email_error(notification)
return False
def _sendmail(self, notification, msg):
self._smtp.sendmail(CONF.email_notifier.from_addr,
notification.address,
msg.as_string())
self._log.debug("Sent email to {}, notification {}".format(notification.address,
notification.to_json()))
def _email_error(self, notification):
self._log.exception("Error sending Email Notification")
self._log.error("Failed email: {}".format(notification.to_json()))
def _smtp_connect(self):
"""Connect to the smtp server
"""
self._log.info("Connecting to Email Server {}".format(
CONF.email_notifier.server))
try:
smtp = smtplib.SMTP(CONF.email_notifier.server,
CONF.email_notifier.port,
timeout=CONF.email_notifier.timeout)
email_notifier_user = CONF.email_notifier.user
email_notifier_password = CONF.email_notifier.password
if email_notifier_user and email_notifier_password:
smtp.login(email_notifier_user,
email_notifier_password)
self._smtp = smtp
return True
except Exception:
self._log.exception("Unable to connect to email server.")
return False
def _create_msg(self, hostname, notification, targethost=None):
"""Create two kind of messages:
1. Notifications that include metrics with a hostname as a dimension.
There may be more than one hostname.
We will only report the hostname if there is only one.
2. Notifications that do not include metrics and therefore no hostname.
Example: API initiated changes.
* A third notification type which include metrics but do not include a hostname will
be treated as type #2.
"""
timestamp = time.asctime(time.gmtime(notification.alarm_timestamp))
alarm_seconds = notification.alarm_timestamp
alarm_ms = int(round(alarm_seconds * 1000))
graf_url = self._get_link_url(notification.metrics[0], alarm_ms)
dimensions = _format_dimensions(notification)
if len(hostname) == 1: # Type 1
if targethost:
text = EMAIL_SINGLE_HOST_BASE.format(
hostname=hostname[0],
target_host=targethost[0],
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" for Host: {} Target: {}'.format(
notification.state, notification.severity,
notification.alarm_name, hostname[0],
targethost[0]
)
else:
text = EMAIL_MULTIPLE_HOST_BASE.format(
hostname=hostname[0],
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" for Host: {}'.format(
notification.state, notification.severity,
notification.alarm_name, hostname[0])
else: # Type 2
text = EMAIL_NO_HOST_BASE.format(
message=notification.message.lower(),
alarm_name=notification.alarm_name,
state=notification.state,
timestamp=timestamp,
alarm_id=notification.alarm_id,
metric_dimensions=dimensions,
link=notification.link,
grafana_url=graf_url,
lifecycle_state=notification.lifecycle_state
)
subject = u'{} {} "{}" '.format(notification.state,
notification.severity,
notification.alarm_name)
msg = email.mime.text.MIMEText(text, 'plain', 'utf-8')
msg['Subject'] = email.header.Header(subject, 'utf-8')
msg['From'] = CONF.email_notifier.from_addr
msg['To'] = notification.address
msg['Date'] = email.utils.formatdate(localtime=True, usegmt=True)
return msg
def _get_link_url(self, metric, timestamp_ms):
"""Returns the url to Grafana including a query with the
respective metric info (name, dimensions, timestamp)
:param metric: the metric for which to display the graph in Grafana
:param timestamp_ms: timestamp of the alarm for the metric in milliseconds
:return: the url to the graph for the given metric or None if no Grafana host
has been defined.
"""
grafana_url = CONF.email_notifier.grafana_url
if grafana_url is None:
return None
url = ''
metric_query = ''
metric_query = "?metric=%s" % metric['name']
dimensions = metric['dimensions']
for key, value in six.iteritems(dimensions):
metric_query += "&dim_%s=%s" % (key, value)
# Show the graph within a range of ten minutes before and after the alarm occurred.
offset = 600000
from_ms = timestamp_ms - offset
to_ms = timestamp_ms + offset
time_query = "&from=%s&to=%s" % (from_ms, to_ms)
url = grafana_url + '/dashboard/script/drilldown.js'
return url + metric_query + time_query
def _format_dimensions(notification):
dimension_sets = []
for metric in notification.metrics:
dimension_sets.append(metric['dimensions'])
dim_set_strings = []
for dimension_set in dimension_sets:
key_value_pairs = []
for key, value in dimension_set.items():
key_value_pairs.append(u' {}: {}'.format(key, value))
set_string = u' {\n' + u',\n'.join(key_value_pairs) + u'\n }'
dim_set_strings.append(set_string)
dimensions = u'[\n' + u',\n'.join(dim_set_strings) + u' \n]'
return dimensions
email_notifier_group = cfg.OptGroup(name='%s_notifier' % EmailNotifier.type)
email_notifier_opts = [
cfg.StrOpt(name='from_addr'),
cfg.HostAddressOpt(name='server'),
cfg.PortOpt(name='port', default=25),
cfg.IntOpt(name='timeout', default=5, min=1),
cfg.StrOpt(name='user', default=None),
cfg.StrOpt(name='password', default=None, secret=True),
cfg.StrOpt(name='grafana_url', default=None)
]
def register_opts(conf):
conf.register_group(email_notifier_group)
conf.register_opts(email_notifier_opts, group=email_notifier_group)
def list_opts():
return {
email_notifier_group: email_notifier_opts
}
| apache-2.0 | 4,724,552,376,887,592,000 | 34.235669 | 95 | 0.596439 | false |
MadsJensen/malthe_alpha_project | source_connectivity_permutation.py | 1 | 6505 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| mit | 1,237,449,237,555,734,500 | 29.539906 | 79 | 0.563105 | false |
roderickmackenzie/gpvdm | gpvdm_gui/gui/QHTabBar.py | 1 | 2005 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package QHTabBar
# A horizontal toolbar tab because QT does not have one.
#
#qt
from PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication
from PyQt5.QtGui import QIcon, QPainter, QFont, QColor
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QSizePolicy,QVBoxLayout,QPushButton,QDialog,QFileDialog,QToolBar,QLabel,QComboBox, QTabWidget,QStatusBar,QMenuBar, QTabBar, QStylePainter, QStyleOptionTab,QStyle
class QHTabBar(QTabBar):
def __init__(self, *args, **kwargs):
QTabBar.__init__(self)
self.setStyleSheet("QTabBar::tab { height: 35px; width: 140px; }")
def paintEvent(self, event):
painter = QStylePainter(self)
option = QStyleOptionTab()
#painter.begin(self)
for index in range(self.count()):
self.initStyleOption(option, index)
tabRect = self.tabRect(index)
tabRect.moveLeft(10)
painter.drawControl(QStyle.CE_TabBarTabShape, option)
painter.drawText(tabRect, Qt.AlignVCenter | Qt.TextDontClip, self.tabText(index))
#painter.end()
| gpl-2.0 | -6,475,185,874,997,040,000 | 37.557692 | 197 | 0.74813 | false |
nnmware/nnmware | apps/money/admin.py | 1 | 2443 | # nnmware(c)2012-2020
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from nnmware.apps.money.models import Transaction, Bill, Currency, ExchangeRate
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
list_display = ('user', 'date', 'actor', 'status', 'amount', 'currency', 'content_object')
search_fields = ('name', )
list_filter = ('user', 'date')
ordering = ('user', )
# readonly_fields = ('actor_ctype','actor_oid','target_ctype','target_oid')
fieldsets = (
(_("Transaction"), {"fields": [("user", "date"),
('amount', 'currency', 'status'),
('actor_ctype', 'actor_oid'),
('content_type', 'object_id')]}),
)
_readonly_fields = [] # Default fields that are readonly for everyone.
def get_readonly_fields(self, request, obj=None):
readonly = list(self._readonly_fields)
if request.user.is_staff and not request.user.is_superuser:
readonly.extend(['user', 'date', 'actor_ctype', 'actor_oid', 'content_type', 'object_id', 'amount',
'currency', 'status'])
return readonly
@admin.register(Bill)
class BillAdmin(admin.ModelAdmin):
list_display = ('user', 'invoice_number', 'date_billed', 'content_object', 'status', 'amount', 'currency')
search_fields = ('name',)
list_filter = ('user', 'date_billed')
ordering = ('user', )
# readonly_fields = ('target_ctype','target_oid')
fieldsets = (
(_("Bill"), {"fields": [("user", "date_billed"),
('amount', 'currency'),
('content_type', 'object_id'),
('invoice_number', 'description_small'),
('description',),
('status', 'date')]}),
)
@admin.register(Currency)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ('code',)
search_fields = ('name',)
@admin.register(ExchangeRate)
class ExchangeRateAdmin(admin.ModelAdmin):
list_display = ('currency', 'date', 'nominal', 'official_rate', 'rate')
search_fields = ('currency',)
fieldsets = (
(_("Exchange Rate"), {"fields": [("currency", "date"), ('nominal', 'official_rate', 'rate')]}),
)
| gpl-3.0 | 3,056,534,765,082,308,000 | 37.171875 | 111 | 0.552599 | false |
t3dev/odoo | addons/website_slides_survey/tests/test_course_certification_failure.py | 1 | 6421 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests.common import SurveyCase
class TestCourseCertificationFailureFlow(SurveyCase):
def test_course_certification_failure_flow(self):
# Step 1: create a simple certification
# --------------------------------------------------
with self.sudo(self.survey_user):
certification = self.env['survey.survey'].create({
'title': 'Small course certification',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certificate': True,
'is_attempts_limited': True,
'passing_score': 100.0,
'attempts_limit': 2,
'stage_id': self.env['survey.stage'].search([('closed', '=', False)]).id
})
self._add_question(
None, 'Question 1', 'simple_choice',
sequence=1,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
self._add_question(
None, 'Question 2', 'simple_choice',
sequence=2,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
# Step 1.1: create a simple channel
self.channel = self.env['slide.channel'].sudo().create({
'name': 'Test Channel',
'channel_type': 'training',
'enroll': 'public',
'visibility': 'public',
'website_published': True,
})
# Step 2: link the certification to a slide of type 'certification'
self.slide_certification = self.env['slide.slide'].sudo().create({
'name': 'Certification slide',
'channel_id': self.channel.id,
'slide_type': 'certification',
'survey_id': certification.id,
'website_published': True,
})
# Step 3: add public user as member of the channel
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.sudo(self.user_public)._generate_certification_url()
self.assertEqual(1, len(slide_partner.user_input_ids), 'A user input should have been automatically created upon slide view')
# Step 4: fill in the created user_input with wrong answers
self.fill_in_answer(slide_partner.user_input_ids[0], certification.question_ids)
self.assertFalse(slide_partner.survey_quizz_passed, 'Quizz should not be marked as passed with wrong answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course because he still has attempts left')
# Step 5: simulate a 'retry'
retry_user_input = self.slide_certification.survey_id.sudo()._create_answer(
partner=self.user_public.partner_id,
**{
'slide_id': self.slide_certification.id,
'slide_partner_id': slide_partner.id
},
invite_token=slide_partner.user_input_ids[0].invite_token
)
# Step 6: fill in the new user_input with wrong answers again
self.fill_in_answer(retry_user_input, certification.question_ids)
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertNotIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should have been kicked out of the course because he failed his last attempt')
# Step 7: add public user as member of the channel once again
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should be a member of the course once again')
new_slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.sudo(self.user_public)._generate_certification_url()
self.assertEqual(1, len(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')), 'A new user input should have been automatically created upon slide view')
# Step 8: fill in the created user_input with correct answers this time
self.fill_in_answer(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')[0], certification.question_ids, good_answers=True)
self.assertTrue(new_slide_partner.survey_quizz_passed, 'Quizz should be marked as passed with correct answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course')
def fill_in_answer(self, answer, questions, good_answers=False):
""" Fills in the user_input with answers for all given questions.
You can control whether the answer will be correct or not with the 'good_answers' param.
(It's assumed that wrong answers are at index 0 of question.labels_ids and good answers at index 1) """
answer.write({
'state': 'done',
'user_input_line_ids': [
(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'answer_score': 1 if good_answers else 0,
'value_suggested': question.labels_ids[1 if good_answers else 0].id
}) for question in questions
]
})
| gpl-3.0 | 2,418,318,624,319,238,000 | 51.631148 | 197 | 0.611587 | false |
dikaiosune/nau-elc-force-completion-mailer | mailer.py | 1 | 7174 | __author__ = 'adam'
import configparser
import logging
import argparse
import smtplib
from email.mime.text import MIMEText
from datetime import datetime, timedelta
from xlrd import open_workbook
from jinja2 import Template
template = Template("""
<p>Dear {{ instructor.first }},</p>
<p>One of the most common Blackboard Learn problems encountered by students and instructors is a student's inability to
resume taking a quiz or test after a temporary network glitch. This is greatly exacerbated by the use of the "Force
Completion" test option. We strongly recommend that you never use this option in your Bb quiz or test unless you
have a very specific pedagogical reason to do so. If you are not familiar with the option, a more detailed
explanation is available at <a href="https://bblearn.nau.edu/bbcswebdav/xid-28427315_1" target="_blank">this page</a>.
If you are familiar with this option and would like to keep it in place regardless, please ignore the rest of this
message.
</p>
<p>We have run a report to find tests and quizzes in your {{ term }} courses that have the Force Completion option
selected. We <i>strongly</i> encourage you to disable this option and to use <b>Auto-Submit</b> instead. To turn off
Force Completion for these items, simply find the item in your course (we have done our best to identify where that
is), select <b>Edit the Test Options</b> from its drop-down menu, and under the <b>Test Availability</b> section,
deselect/uncheck <b>Force Completion</b>, then click <b>Submit</b>. </p>
<p>{{ term }} tests with Force Completion enabled as of {{ day_before_report }}:</p>
<ul>
{% for course, tests in instructor.courses.items() %}
<li> {{ course }}
<ul>
{% for test in tests %} <li> {{ test }} </li>
{% endfor %}
</ul>
<br/>
</li>
{% endfor %}
</ul>
<p>Please contact the e-Learning Center if you would like to discuss this setting. In short, we recommend that you never
use the Force Completion option.</p>
<p>
<a href="http://nau.edu/elc">e-Learning Center</a><br>
<a href="mailto:[email protected]">[email protected]</a><br>
In Flagstaff: +1 (928) 523-5554<br>
Elsewhere: +1 (866) 802-5256<br>
</p>
""")
def create_root_logger(log_file):
parent_logger = logging.getLogger('nau_force_completion')
parent_logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s^%(levelname)s: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
parent_logger.addHandler(fh)
parent_logger.addHandler(ch)
def parse_cli_arguments():
argparser = argparse.ArgumentParser(description='Force Completion email tool for ELC @ NAU.')
argparser.add_argument('--config', required=True, help='Path to ini file.', metavar='FILE')
argparser.add_argument('--report', required=True, metavar='FILE',
help='Path to the force completion report file.')
argparser.add_argument('--dry-run', action='store_true',
help='Add this flag to send all emails to the default address specified in the ini file.')
return vars(argparser.parse_args())
# parse CLI args for:
args = parse_cli_arguments()
# dry-run?
dry_run = args['dry_run']
# report file
report_file = args['report']
config_file = args['config']
# parse some report metadata from the filename
'force-completion-1154-2015-08-04_120920'
filename = report_file[report_file.index('force-completion-'):]
termcode = filename[17:21]
term = {'1': 'Spring', '4': 'Summer', '7': 'Fall', '8': 'Winter'}[termcode[3]] + ' 20' + termcode[1:3]
day_before_report = datetime.strptime(filename[22:32], '%Y-%m-%d') - timedelta(days=1)
day_before_report = day_before_report.strftime('%A %B %d, %Y')
# read configuration
config = configparser.ConfigParser()
config.read(config_file)
config = config['FORCE_COMPLETE']
# setup root logger
logfile = config.get('logfile', 'force-completion-mailer.log')
create_root_logger(logfile)
log = logging.getLogger('nau_force_completion.mailer')
log.debug("Parameters: %s", args)
log.debug("Config: %s", {k: config[k] for k in config})
# get default email
default_email = config.get('default_email')
# get server info
smtp_server = config['smtp_server']
smtp_port = config['smtp_port']
sender = smtplib.SMTP(host=smtp_server, port=smtp_port)
# parse report into instructors, courses and tests
report = open_workbook(filename=report_file).sheet_by_index(0)
header_keys = [report.cell(0, idx).value for idx in range(report.ncols)]
rows_as_dict_list = []
for row_index in range(1, report.nrows):
d = {header_keys[col_index]: report.cell(row_index, col_index).value
for col_index in range(report.ncols)}
rows_as_dict_list.append(d)
instructors = {}
num_instructors = 0
num_courses = 0
num_tests = 0
while len(rows_as_dict_list) > 0:
row = rows_as_dict_list.pop()
uid = row['PI UID']
first_name = row['PI First Name']
last_name = row['PI Last Name']
email = row['PI Email']
course_id = row['Course ID']
course_name = row['Course Name']
test_name = row['Test Name']
test_path = row['Path to Test']
if uid not in instructors:
instructors[uid] = {'first': first_name, 'last': last_name, 'email': email, 'courses': {}}
num_instructors += 1
if course_name not in instructors[uid]['courses']:
instructors[uid]['courses'][course_name] = []
num_courses += 1
instructors[uid]['courses'][course_name].append(test_path + ' > ' + test_name)
num_tests += 1
# remove the course id from the data structure, it's no longer needed for templating
for i in instructors:
for c in instructors[i]['courses']:
instructors[i]['courses'][c] = sorted(instructors[i]['courses'][c])
# print stats on report (num instructors, num courses, num tests)
log.info('Report successfully parsed.')
log.info('%s instructors found in report.', num_instructors)
log.info('%s courses found in report.', num_courses)
log.info('%s tests found in report.', num_tests)
log.info('Sending %s emails...', num_instructors)
# render templates and send emails
emails_sent = 0
for uid in instructors:
instructor = instructors.get(uid)
current_email = template.render(instructor=instructor, term=term, day_before_report=day_before_report)
msg = MIMEText(current_email, 'html')
msg['Subject'] = 'Bb Learn Force Completion Notification'
msg['From'] = 'e-Learning Center <[email protected]>'
# if it's a dry run, send to the test email, rather than each individual user
to_addr = default_email if dry_run else instructor.get('email')
instructor_name = instructor['first'] + ' ' + instructor['last']
msg['To'] = instructor_name + ' <' + to_addr + '>'
sender.sendmail(from_addr='[email protected]', to_addrs=to_addr, msg=msg.as_string())
emails_sent += 1
log.info('Sent email to %s (%s), %s/%s sent.', instructor_name, to_addr, emails_sent, num_instructors)
sender.quit()
| mit | -1,298,246,076,243,391,500 | 35.050251 | 122 | 0.681907 | false |
jackrzhang/zulip | zerver/views/invite.py | 1 | 5106 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import List, Optional, Set
from zerver.decorator import require_realm_admin, to_non_negative_int, \
require_non_guest_human_user
from zerver.lib.actions import do_invite_users, do_revoke_user_invite, do_resend_user_invite_email, \
get_default_subs, do_get_user_invites, do_create_multiuse_invite_link
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error, json_response
from zerver.lib.streams import access_stream_by_name, access_stream_by_id
from zerver.lib.validator import check_string, check_list, check_bool, check_int
from zerver.models import PreregistrationUser, Stream, UserProfile
import re
@require_non_guest_human_user
@has_request_variables
def invite_users_backend(request: HttpRequest, user_profile: UserProfile,
invitee_emails_raw: str=REQ("invitee_emails"),
invite_as_admin: Optional[bool]=REQ(validator=check_bool, default=False),
) -> HttpResponse:
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
return json_error(_("Must be an organization administrator"))
if invite_as_admin and not user_profile.is_realm_admin:
return json_error(_("Must be an organization administrator"))
if not invitee_emails_raw:
return json_error(_("You must specify at least one email address."))
invitee_emails = get_invitee_emails_set(invitee_emails_raw)
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream # type: Optional[Stream]
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = [] # type: List[Stream]
for stream_name in stream_names:
try:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
except JsonableError:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
do_invite_users(user_profile, invitee_emails, streams, invite_as_admin)
return json_success()
def get_invitee_emails_set(invitee_emails_raw: str) -> Set[str]:
invitee_emails_list = set(re.split(r'[,\n]', invitee_emails_raw))
invitee_emails = set()
for email in invitee_emails_list:
is_email_with_name = re.search(r'<(?P<email>.*)>', email)
if is_email_with_name:
email = is_email_with_name.group('email')
invitee_emails.add(email.strip())
return invitee_emails
@require_realm_admin
def get_user_invites(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
all_users = do_get_user_invites(user_profile)
return json_success({'invites': all_users})
@require_realm_admin
@has_request_variables
def revoke_user_invite(request: HttpRequest, user_profile: UserProfile,
prereg_id: int) -> HttpResponse:
try:
prereg_user = PreregistrationUser.objects.get(id=prereg_id)
except PreregistrationUser.DoesNotExist:
raise JsonableError(_("No such invitation"))
if prereg_user.referred_by.realm != user_profile.realm:
raise JsonableError(_("No such invitation"))
do_revoke_user_invite(prereg_user)
return json_success()
@require_realm_admin
@has_request_variables
def resend_user_invite_email(request: HttpRequest, user_profile: UserProfile,
prereg_id: int) -> HttpResponse:
try:
prereg_user = PreregistrationUser.objects.get(id=prereg_id)
except PreregistrationUser.DoesNotExist:
raise JsonableError(_("No such invitation"))
if (prereg_user.referred_by.realm != user_profile.realm):
raise JsonableError(_("No such invitation"))
timestamp = do_resend_user_invite_email(prereg_user)
return json_success({'timestamp': timestamp})
@require_realm_admin
@has_request_variables
def generate_multiuse_invite_backend(request: HttpRequest, user_profile: UserProfile,
stream_ids: List[int]=REQ(validator=check_list(check_int),
default=[])) -> HttpResponse:
streams = []
for stream_id in stream_ids:
try:
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id)
except JsonableError:
return json_error(_("Invalid stream id {}. No invites were sent.".format(stream_id)))
streams.append(stream)
invite_link = do_create_multiuse_invite_link(user_profile, streams)
return json_success({'invite_link': invite_link})
| apache-2.0 | -138,191,675,718,502,620 | 42.641026 | 101 | 0.686643 | false |
sanguinariojoe/FreeCAD | src/Mod/Fem/femtaskpanels/task_solver_ccxtools.py | 9 | 16191 | # ***************************************************************************
# * Copyright (c) 2015 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver calculix ccx tools task panel for the document object"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package task_solver_ccxtools
# \ingroup FEM
# \brief task panel for solver ccx tools object
import os
import sys
import time
from PySide import QtCore
from PySide import QtGui
from PySide.QtCore import Qt
from PySide.QtGui import QApplication
import FreeCAD
import FreeCADGui
import FemGui
if sys.version_info.major >= 3:
def unicode(text, *args):
return str(text)
class _TaskPanel:
"""
The TaskPanel for CalculiX ccx tools solver object
"""
def __init__(self, solver_object):
self.form = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/SolverCalculix.ui"
)
from femtools.ccxtools import CcxTools as ccx
# we do not need to pass the analysis, it will be found on fea init
# TODO: if there is not analysis object in document init of fea
# will fail with an exception and task panel will not open
# handle more smart by a pop up error message and still open
# task panel, may be deactivate write and run button.
self.fea = ccx(solver_object)
self.fea.setup_working_dir()
self.fea.setup_ccx()
self.Calculix = QtCore.QProcess()
self.Timer = QtCore.QTimer()
self.Timer.start(300)
self.fem_console_message = ""
# Connect Signals and Slots
QtCore.QObject.connect(
self.form.tb_choose_working_dir,
QtCore.SIGNAL("clicked()"),
self.choose_working_dir
)
QtCore.QObject.connect(
self.form.pb_write_inp,
QtCore.SIGNAL("clicked()"),
self.write_input_file_handler
)
QtCore.QObject.connect(
self.form.pb_edit_inp,
QtCore.SIGNAL("clicked()"),
self.editCalculixInputFile
)
QtCore.QObject.connect(
self.form.pb_run_ccx,
QtCore.SIGNAL("clicked()"),
self.runCalculix
)
QtCore.QObject.connect(
self.form.rb_static_analysis,
QtCore.SIGNAL("clicked()"),
self.select_static_analysis
)
QtCore.QObject.connect(
self.form.rb_frequency_analysis,
QtCore.SIGNAL("clicked()"),
self.select_frequency_analysis
)
QtCore.QObject.connect(
self.form.rb_thermomech_analysis,
QtCore.SIGNAL("clicked()"),
self.select_thermomech_analysis
)
QtCore.QObject.connect(
self.form.rb_check_mesh,
QtCore.SIGNAL("clicked()"),
self.select_check_mesh
)
QtCore.QObject.connect(
self.form.rb_buckling_analysis,
QtCore.SIGNAL("clicked()"),
self.select_buckling_analysis
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("started()"),
self.calculixStarted
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("stateChanged(QProcess::ProcessState)"),
self.calculixStateChanged
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("error(QProcess::ProcessError)"),
self.calculixError
)
QtCore.QObject.connect(
self.Calculix,
QtCore.SIGNAL("finished(int)"),
self.calculixFinished
)
QtCore.QObject.connect(
self.Timer,
QtCore.SIGNAL("timeout()"),
self.UpdateText
)
self.update()
def getStandardButtons(self):
# only show a close button
# def accept() in no longer needed, since there is no OK button
return int(QtGui.QDialogButtonBox.Close)
def reject(self):
FreeCADGui.ActiveDocument.resetEdit()
def update(self):
"fills the widgets"
self.form.le_working_dir.setText(self.fea.working_dir)
if self.fea.solver.AnalysisType == "static":
self.form.rb_static_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "frequency":
self.form.rb_frequency_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "thermomech":
self.form.rb_thermomech_analysis.setChecked(True)
elif self.fea.solver.AnalysisType == "check":
self.form.rb_check_mesh.setChecked(True)
elif self.fea.solver.AnalysisType == "buckling":
self.form.rb_buckling_analysis.setChecked(True)
return
def femConsoleMessage(self, message="", color="#000000"):
if sys.version_info.major < 3:
message = message.encode("utf-8", "replace")
self.fem_console_message = self.fem_console_message + (
'<font color="#0000FF">{0:4.1f}:</font> <font color="{1}">{2}</font><br>'
.format(time.time() - self.Start, color, message)
)
self.form.textEdit_Output.setText(self.fem_console_message)
self.form.textEdit_Output.moveCursor(QtGui.QTextCursor.End)
def printCalculiXstdout(self):
out = self.Calculix.readAllStandardOutput()
# print(type(out))
# <class 'PySide2.QtCore.QByteArray'>
if out.isEmpty():
self.femConsoleMessage("CalculiX stdout is empty", "#FF0000")
return False
if sys.version_info.major >= 3:
# https://forum.freecadweb.org/viewtopic.php?f=18&t=39195
# convert QByteArray to a binary string an decode it to "utf-8"
out = out.data().decode() # "utf-8" can be omitted
# print(type(out))
# print(out)
else:
try:
out = unicode(out, "utf-8", "replace")
rx = QtCore.QRegExp("\\*ERROR.*\\n\\n")
# print(rx)
rx.setMinimal(True)
pos = rx.indexIn(out)
while not pos < 0:
match = rx.cap(0)
FreeCAD.Console.PrintError(match.strip().replace("\n", " ") + "\n")
pos = rx.indexIn(out, pos + 1)
except UnicodeDecodeError:
self.femConsoleMessage("Error converting stdout from CalculiX", "#FF0000")
out = os.linesep.join([s for s in out.splitlines() if s])
out = out.replace("\n", "<br>")
# print(out)
self.femConsoleMessage(out)
if "*ERROR in e_c3d: nonpositive jacobian" in out:
error_message = (
"\n\nCalculiX returned an error due to "
"nonpositive jacobian determinant in at least one element\n"
"Use the run button on selected solver to get a better error output.\n"
)
FreeCAD.Console.PrintError(error_message)
if "*ERROR" in out:
return False
else:
return True
def UpdateText(self):
if(self.Calculix.state() == QtCore.QProcess.ProcessState.Running):
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def calculixError(self, error=""):
print("Error() {}".format(error))
self.femConsoleMessage("CalculiX execute error: {}".format(error), "#FF0000")
def calculixNoError(self):
print("CalculiX done without error!")
self.femConsoleMessage("CalculiX done without error!", "#00AA00")
def calculixStarted(self):
# print("calculixStarted()")
FreeCAD.Console.PrintLog("calculix state: {}\n".format(self.Calculix.state()))
self.form.pb_run_ccx.setText("Break CalculiX")
def calculixStateChanged(self, newState):
if (newState == QtCore.QProcess.ProcessState.Starting):
self.femConsoleMessage("Starting CalculiX...")
if (newState == QtCore.QProcess.ProcessState.Running):
self.femConsoleMessage("CalculiX is running...")
if (newState == QtCore.QProcess.ProcessState.NotRunning):
self.femConsoleMessage("CalculiX stopped.")
def calculixFinished(self, exitCode):
# print("calculixFinished(), exit code: {}".format(exitCode))
FreeCAD.Console.PrintLog("calculix state: {}\n".format(self.Calculix.state()))
# Restore previous cwd
QtCore.QDir.setCurrent(self.cwd)
self.Timer.stop()
if self.printCalculiXstdout():
self.calculixNoError()
else:
self.calculixError()
self.form.pb_run_ccx.setText("Re-run CalculiX")
self.femConsoleMessage("Loading result sets...")
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
self.fea.reset_mesh_purge_results_checked()
self.fea.inp_file_name = self.fea.inp_file_name
# check if ccx is greater than 2.10, if not do not read results
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548#p183829 Point 3
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548&start=20#p183909
# https://forum.freecadweb.org/viewtopic.php?f=18&t=23548&start=30#p185027
# https://github.com/FreeCAD/FreeCAD/commit/3dd1c9f
majorVersion, minorVersion = self.fea.get_ccx_version()
if majorVersion == 2 and minorVersion <= 10:
message = (
"The used CalculiX version {}.{} creates broken output files. "
"The result file will not be read by FreeCAD FEM. "
"You still can try to read it stand alone with FreeCAD, but it is "
"strongly recommended to upgrade CalculiX to a newer version.\n"
.format(majorVersion, minorVersion)
)
QtGui.QMessageBox.warning(None, "Upgrade CalculiX", message)
raise
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.fea.load_results()
except Exception:
FreeCAD.Console.PrintError("loading results failed\n")
QApplication.restoreOverrideCursor()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def choose_working_dir(self):
wd = QtGui.QFileDialog.getExistingDirectory(None, "Choose CalculiX working directory",
self.fea.working_dir)
if os.path.isdir(wd):
self.fea.setup_working_dir(wd)
self.form.le_working_dir.setText(self.fea.working_dir)
def write_input_file_handler(self):
self.Start = time.time()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
QApplication.restoreOverrideCursor()
if self.check_prerequisites_helper():
QApplication.setOverrideCursor(Qt.WaitCursor)
self.fea.write_inp_file()
if self.fea.inp_file_name != "":
self.femConsoleMessage("Write completed.")
self.form.pb_edit_inp.setEnabled(True)
self.form.pb_run_ccx.setEnabled(True)
else:
self.femConsoleMessage("Write .inp file failed!", "#FF0000")
QApplication.restoreOverrideCursor()
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
def check_prerequisites_helper(self):
self.Start = time.time()
self.femConsoleMessage("Check dependencies...")
self.form.l_time.setText("Time: {0:4.1f}: ".format(time.time() - self.Start))
self.fea.update_objects()
message = self.fea.check_prerequisites()
if message != "":
QtGui.QMessageBox.critical(None, "Missing prerequisite(s)", message)
return False
return True
def start_ext_editor(self, ext_editor_path, filename):
if not hasattr(self, "ext_editor_process"):
self.ext_editor_process = QtCore.QProcess()
if self.ext_editor_process.state() != QtCore.QProcess.Running:
self.ext_editor_process.start(ext_editor_path, [filename])
def editCalculixInputFile(self):
print("editCalculixInputFile {}".format(self.fea.inp_file_name))
ccx_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem/Ccx")
if ccx_prefs.GetBool("UseInternalEditor", True):
FemGui.open(self.fea.inp_file_name)
else:
ext_editor_path = ccx_prefs.GetString("ExternalEditorPath", "")
if ext_editor_path:
self.start_ext_editor(ext_editor_path, self.fea.inp_file_name)
else:
print(
"External editor is not defined in FEM preferences. "
"Falling back to internal editor"
)
FemGui.open(self.fea.inp_file_name)
def runCalculix(self):
# print("runCalculix")
self.Start = time.time()
self.femConsoleMessage("CalculiX binary: {}".format(self.fea.ccx_binary))
self.femConsoleMessage("CalculiX input file: {}".format(self.fea.inp_file_name))
self.femConsoleMessage("Run CalculiX...")
FreeCAD.Console.PrintMessage(
"run CalculiX at: {} with: {}\n"
.format(self.fea.ccx_binary, self.fea.inp_file_name)
)
# change cwd because ccx may crash if directory has no write permission
# there is also a limit of the length of file names so jump to the document directory
self.cwd = QtCore.QDir.currentPath()
fi = QtCore.QFileInfo(self.fea.inp_file_name)
QtCore.QDir.setCurrent(fi.path())
self.Calculix.start(self.fea.ccx_binary, ["-i", fi.baseName()])
QApplication.restoreOverrideCursor()
def select_analysis_type(self, analysis_type):
if self.fea.solver.AnalysisType != analysis_type:
self.fea.solver.AnalysisType = analysis_type
self.form.pb_edit_inp.setEnabled(False)
self.form.pb_run_ccx.setEnabled(False)
def select_static_analysis(self):
self.select_analysis_type("static")
def select_frequency_analysis(self):
self.select_analysis_type("frequency")
def select_thermomech_analysis(self):
self.select_analysis_type("thermomech")
def select_check_mesh(self):
self.select_analysis_type("check")
def select_buckling_analysis(self):
self.select_analysis_type("buckling")
| lgpl-2.1 | -6,757,533,628,294,308,000 | 39.680905 | 94 | 0.577852 | false |
matt-graham/reversible-rng | revrng/tests/test_reversible_random_state.py | 1 | 5637 | import numpy as np
from revrng.numpy_wrapper import ReversibleRandomState
SEED = 12345
N_ITER = 100
IN_RANGE_SAMPLES = 10000
SHAPES = [2, (1,), (5, 4), (3, 2, 1, 2)]
def test_shape():
state = ReversibleRandomState(SEED)
for shape in SHAPES:
# ndarray shape always tuple even if integer specified
tuple_shape = shape if isinstance(shape, tuple) else (shape,)
samples = state.random_int32(shape)
assert samples.shape == tuple_shape, (
'random_int32 shape mismatch: should be {0} actually {1}'
.format(tuple_shape, samples.shape)
)
samples = state.standard_uniform(shape)
assert samples.shape == tuple_shape, (
'standard_uniform shape mismatch: should be {0} actually {1}'
.format(tuple_shape, samples.shape)
)
samples = state.standard_normal(shape)
assert samples.shape == tuple_shape, (
'standard_normal shape mismatch: should be {0} actually {1}'
.format(tuple_shape, samples.shape)
)
def test_no_shape_calls():
state = ReversibleRandomState(SEED)
sample = state.random_int32()
assert isinstance(sample, int), (
'random_int32 type mismatch: should be long instance actually {0}'
.format(type(sample))
)
sample = state.standard_uniform()
assert isinstance(sample, float), (
'standard_uniform type mismatch: should be float instance actually {0}'
.format(type(sample))
)
sample = state.standard_normal()
assert isinstance(sample, float), (
'standard_normal type mismatch: should be float instance actually {0}'
.format(type(sample))
)
def test_dtype():
state = ReversibleRandomState(SEED)
shape = (5, 4)
samples = state.random_int32(shape)
assert samples.dtype == np.uint64, (
'random_int32 dtype mismatch: should be uint64 actually {0}'
.format(samples.dtype)
)
samples = state.standard_uniform(shape)
assert samples.dtype == np.float64, (
'standard_uniform dtype mismatch: should be float64 actually {0}'
.format(samples.dtype)
)
samples = state.standard_normal(shape)
assert samples.dtype == np.float64, (
'standard_normal dtype mismatch: should be float64 actually {0}'
.format(samples.dtype)
)
def test_reversibility_random_int32():
state = ReversibleRandomState(SEED)
samples_fwd = []
for i in range(N_ITER):
samples_fwd.append(state.random_int32(i + 1))
state.reverse()
for i in range(N_ITER - 1, -1, -1):
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.random_int32(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed random_int32 samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
def test_reversibility_standard_uniform():
state = ReversibleRandomState(SEED)
samples_fwd = []
for i in range(N_ITER):
samples_fwd.append(state.standard_uniform(i + 1))
state.reverse()
for i in range(N_ITER - 1, -1, -1):
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.standard_uniform(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed standard_uniform samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
def test_reversibility_standard_normal():
state = ReversibleRandomState(SEED)
samples_fwd = []
for i in range(N_ITER):
samples_fwd.append(state.standard_normal(i + 1))
state.reverse()
for i in range(N_ITER - 1, -1, -1):
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.standard_normal(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed standard_normal samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
def test_reversibility_mixed():
state = ReversibleRandomState(SEED)
samples_fwd = []
for i in range(N_ITER):
samples_fwd.append(state.random_int32(i + 1))
samples_fwd.append(state.standard_uniform(i + 1))
samples_fwd.append(state.standard_normal(i + 1))
state.reverse()
for i in range(N_ITER - 1, -1, -1):
# sample in reverse order
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.standard_normal(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed standard_normal samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.standard_uniform(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed standard_uniform samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
sample_fwd = samples_fwd.pop(-1)
sample_bwd = state.random_int32(i + 1)
assert np.all(sample_fwd == sample_bwd), (
'Incorrect reversed random_int32 samples, expected {0} got {1}'
.format(sample_fwd, sample_bwd)
)
def test_random_int32_in_range():
state = ReversibleRandomState(SEED)
samples = state.random_int32(IN_RANGE_SAMPLES)
assert np.all(samples >= 0) and np.all(samples < 2**32), (
'random_int32 samples out of range [0, 2**32)'
)
def test_standard_uniform_in_range():
state = ReversibleRandomState(SEED)
samples = state.standard_uniform(IN_RANGE_SAMPLES)
assert np.all(samples >= 0.) and np.all(samples < 1.), (
'standard_uniform samples out of range [0., 1.)'
)
| mit | -8,083,645,207,336,234,000 | 34.23125 | 79 | 0.615221 | false |
vhakulinen/bruno-server | bruno/commands/decorators.py | 1 | 1424 | from bruno.send_utils import send_error
from bruno.env import inputs
class Args:
"""
Decorator for validating number of args. You can also pass in help
message.
"""
def __init__(self, no_args, msg=None):
self.no_args = no_args
if msg:
self.msg = '(' + msg + ')'
else:
self.msg = None
def __call__(self, func):
def wrapper(socket, args):
if self.no_args == len(args):
func(socket, args)
else:
send_error(socket, 102,
'Invalid arguments ' + self.msg if self.msg else '')
return wrapper
class auth_required:
"""
Decorator for checking if client has loged in. If not, sends auth error
to client.
"""
def __init__(self, func):
self.func = func
def __call__(self, socket, *args, **kwargs):
if inputs[socket].profile:
self.func(socket, *args, **kwargs)
else:
send_error(socket, 203)
class udp_required:
"""
Decorator for chekcing if client has gave us udp connection. If not
sends error to client.
"""
def __init__(self, func):
self.func = func
def __call__(self, socket, *args, **kwargs):
if inputs[socket].udp_addr:
self.func(socket, *args, **kwargs)
else:
send_error(socket, 103)
| gpl-2.0 | -6,546,121,015,751,768,000 | 25.37037 | 79 | 0.525983 | false |
FireWalkerX/eyeOS-FOSS-V.2.0 | devtools/qooxdoo-sdk/tool/bin/create-application.py | 1 | 12157 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2008 - 2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Fabian Jakobs (fjakobs)
# * Andreas Ecker (ecker)
#
################################################################################
import re, os, sys, optparse, shutil, errno, stat, codecs, glob
from string import Template
import qxenviron
from ecmascript.frontend import lang
from generator.runtime.Log import Log
from misc import Path
SCRIPT_DIR = qxenviron.scriptDir
FRAMEWORK_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
SKELETON_DIR = unicode(os.path.normpath(os.path.join(FRAMEWORK_DIR, "component", "skeleton")))
APP_DIRS = [x for x in os.listdir(SKELETON_DIR) if not re.match(r'^\.',x)]
R_ILLEGAL_NS_CHAR = re.compile(r'(?u)[^\.\w]') # allow unicode, but disallow $
R_SHORT_DESC = re.compile(r'(?m)^short:: (.*)$') # to search "short:: ..." in skeleton's 'readme.txt'
QOOXDOO_VERSION = '' # will be filled later
def getAppInfos():
appInfos = {}
for dir in APP_DIRS:
readme = os.path.join(SKELETON_DIR, dir, "readme.txt")
appinfo = ""
if os.path.isfile(readme):
cont = open(readme, "r").read()
mo = R_SHORT_DESC.search(cont)
if mo:
appinfo = mo.group(1)
appInfos[dir] = appinfo
return appInfos
APP_INFOS = getAppInfos()
def getQxVersion():
global QOOXDOO_VERSION
versionFile = os.path.join(FRAMEWORK_DIR, "version.txt")
version = codecs.open(versionFile,"r", "utf-8").read()
version = version.strip()
QOOXDOO_VERSION = version
return
def createApplication(options):
out = options.out
if sys.platform == 'win32' and re.match( r'^[a-zA-Z]:$', out):
out = out + '\\'
else:
out = os.path.expanduser(out)
if not os.path.isdir(out):
if os.path.isdir(normalizePath(out)):
out = normalizePath(out)
else:
console.error("Output directory '%s' does not exist" % out)
sys.exit(1)
outDir = os.path.join(out, options.name)
copySkeleton(options.skeleton_path, options.type, outDir, options.namespace)
if options.type == "contribution":
patchSkeleton(os.path.join(outDir, "trunk"), FRAMEWORK_DIR, options)
else:
patchSkeleton(outDir, FRAMEWORK_DIR, options)
return
def copySkeleton(skeleton_path, app_type, dir, namespace):
console.log("Copy skeleton into the output directory: %s" % dir)
def rename_folders(root_dir):
# rename name space parts of paths
# rename in class path
source_dir = os.path.join(root_dir, "source", "class", "custom")
out_dir = os.path.join(root_dir, "source", "class")
expand_dir(source_dir, out_dir, namespace)
# rename in resource path
resource_dir = os.path.join(root_dir, "source", "resource", "custom")
out_dir = os.path.join(root_dir, "source", "resource")
expand_dir(resource_dir, out_dir, namespace)
# rename in script path
script_dir = os.path.join(root_dir, "source", "script")
script_files = glob.glob(os.path.join(script_dir, "custom.*js"))
if script_files:
for script_file in script_files:
os.rename(script_file, script_file.replace("custom", namespace))
template = os.path.join(skeleton_path, app_type)
if not os.path.isdir(template):
console.error("Unknown application type '%s'." % app_type)
sys.exit(1)
try:
shutil.copytree(template, dir)
except OSError:
console.error("Failed to copy skeleton, maybe the directory already exists")
sys.exit(1)
if app_type == "contribution":
app_dir = os.path.join(dir, "trunk")
else:
app_dir = dir
rename_folders(app_dir)
if app_type == "contribution":
rename_folders(os.path.join(app_dir, "demo", "default"))
#clean svn directories
for root, dirs, files in os.walk(dir, topdown=False):
if ".svn" in dirs:
filename = os.path.join(root, ".svn")
shutil.rmtree(filename, ignore_errors=False, onerror=handleRemoveReadonly)
def expand_dir(indir, outroot, namespace):
"appends namespace parts to outroot, and renames indir to the last part"
if not (os.path.isdir(indir) and os.path.isdir(outroot)):
return
ns_parts = namespace.split('.')
target = outroot
for part in ns_parts:
target = os.path.join(target, part)
if part == ns_parts[-1]: # it's the last part
os.rename(indir, target)
else:
os.mkdir(target)
def patchSkeleton(dir, framework_dir, options):
absPath = normalizePath(framework_dir)
if absPath[-1] == "/":
absPath = absPath[:-1]
if sys.platform == 'cygwin':
if re.match( r'^\.{1,2}\/', dir ):
relPath = Path.rel_from_to(normalizePath(dir), framework_dir)
elif re.match( r'^/cygdrive\b', dir):
relPath = Path.rel_from_to(dir, framework_dir)
else:
relPath = Path.rel_from_to(normalizePath(dir), normalizePath(framework_dir))
else:
relPath = Path.rel_from_to(normalizePath(dir), normalizePath(framework_dir))
relPath = re.sub(r'\\', "/", relPath)
if relPath[-1] == "/":
relPath = relPath[:-1]
if not os.path.isdir(os.path.join(dir, relPath)):
console.error("Relative path to qooxdoo directory is not correct: '%s'" % relPath)
sys.exit(1)
if options.type == "contribution":
relPath = os.path.join(os.pardir, os.pardir, "qooxdoo", QOOXDOO_VERSION)
relPath = re.sub(r'\\', "/", relPath)
for root, dirs, files in os.walk(dir):
for file in files:
split = file.split(".")
if len(split) >= 3 and split[1] == "tmpl":
outFile = os.path.join(root, split[0] + "." + ".".join(split[2:]))
inFile = os.path.join(root, file)
console.log("Patching file '%s'" % outFile)
config = Template(open(inFile).read())
out = open(outFile, "w")
out.write(
config.substitute({
"Name": options.name,
"Namespace": options.namespace,
"NamespacePath" : (options.namespace).replace('.', '/'),
"REL_QOOXDOO_PATH": relPath,
"ABS_QOOXDOO_PATH": absPath,
"QOOXDOO_VERSION": QOOXDOO_VERSION,
"Cache" : options.cache,
}).encode('utf-8')
)
out.close()
os.remove(inFile)
for root, dirs, files in os.walk(dir):
for file in [file for file in files if file.endswith(".py")]:
os.chmod(os.path.join(root, file), (stat.S_IRWXU
|stat.S_IRGRP |stat.S_IXGRP
|stat.S_IROTH |stat.S_IXOTH)) # 0755
def handleRemoveReadonly(func, path, exc):
# For Windows the 'readonly' must not be set for resources to be removed
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
def normalizePath(path):
# Fix Windows annoyance to randomly return drive letters uppercase or lowercase.
# Under Cygwin the user could also supply a lowercase drive letter. For those
# two systems, the drive letter is always converted to uppercase, the remaining
# path to lowercase
if not sys.platform == 'win32' and not sys.platform == 'cygwin':
return path
path = re.sub(r'\\+', "/", path)
if sys.platform == 'cygwin':
search = re.match( r'^/cygdrive/([a-zA-Z])(/.*)$', path)
if search:
return search.group(1).upper() + ":" + search.group(2).lower()
search = re.match( r'^([a-zA-Z])(:.*)$', path )
if search:
return search.group(1).upper() + search.group(2).lower()
return path
def checkNamespace(options):
# check availability and spelling
if not options.namespace:
if R_ILLEGAL_NS_CHAR.search(options.name):
convertedName = R_ILLEGAL_NS_CHAR.sub("_", options.name)
console.log("WARNING: Converted illegal characters in name (from %s to %s)" % (options.name, convertedName))
options.name = convertedName
options.namespace = convertedName.lower()
else:
options.namespace = options.name.lower()
else:
options.namespace = options.namespace.decode('utf-8')
if R_ILLEGAL_NS_CHAR.search(options.namespace):
convertedNamespace = R_ILLEGAL_NS_CHAR.sub("_", options.namespace)
console.log("WARNING: Converted illegal characters in namespace (from %s to %s)" % (options.namespace, convertedNamespace))
options.namespace = convertedNamespace
# check reserved words
if options.namespace in lang.GLOBALS:
console.error("JS reserved word '%s' is not allowed as name space" % options.namespace)
sys.exit(1)
def main():
parser = optparse.OptionParser()
parser.set_usage('''\
%prog --name APPLICATIONNAME [--out DIRECTORY]
[--namespace NAMESPACE] [--type TYPE]
[-logfile LOGFILE] [--skeleton-path PATH]
Script to create a new qooxdoo application.
Example: For creating a regular GUI application \'myapp\' you could execute:
%prog --name myapp''')
parser.add_option(
"-n", "--name", dest="name", metavar="APPLICATIONNAME",
help="Name of the application. An application folder with identical name will be created. (Required)"
)
parser.add_option(
"-o", "--out", dest="out", metavar="DIRECTORY", default=".",
help="Output directory for the application folder. (Default: %default)"
)
parser.add_option(
"-s", "--namespace", dest="namespace", metavar="NAMESPACE", default=None,
help="Applications's top-level namespace. (Default: APPLICATIONNAME)"
)
parser.add_option(
"-t", "--type", dest="type", metavar="TYPE", default="gui",
help="Type of the application to create, one of: "+str(map(str, sorted(APP_INFOS.keys())))+"." +
str(", ".join(["'%s' %s" % (x, y) for x,y in sorted(APP_INFOS.items())])) +
". (Default: %default)"
)
parser.add_option(
"-l", "--logfile", dest="logfile", metavar="LOGFILE",
default=None, type="string", help="Log file"
)
parser.add_option(
"-p", "--skeleton-path", dest="skeleton_path", metavar="PATH", default=SKELETON_DIR,
help="(Advanced) Path where the script looks for skeletons. " +
"The directory must contain sub directories named by " +
"the application types. (Default: %default)"
)
parser.add_option(
"--cache", dest="cache", metavar="PATH", default="${TMPDIR}/cache",
help="Path to the cache directory; will be entered into config.json's CACHE macro (Default: %default)"
)
(options, args) = parser.parse_args(sys.argv[1:])
if not options.name:
parser.print_help()
sys.exit(1)
else:
options.name = options.name.decode('utf-8')
# Initialize console
global console
console = Log(options.logfile, "info")
checkNamespace(options)
getQxVersion()
createApplication(options)
console.log("DONE")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print
print "Keyboard interrupt!"
sys.exit(1)
| agpl-3.0 | 4,756,107,839,994,583,000 | 33.933908 | 135 | 0.586329 | false |
gtesei/fast-furious | competitions/santander-customer-transaction-prediction/base_light_gbm1.py | 1 | 2556 | import lightgbm as lgb
import pandas as pd
import numpy as np
import sys
from datetime import datetime
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
path=Path("data/")
train=pd.read_csv(path/"train.csv").drop("ID_code",axis=1)
test=pd.read_csv(path/"test.csv").drop("ID_code",axis=1)
param = {
'boost_from_average':'false',
'bagging_fraction': 0.5,
'boost': 'gbdt',
'feature_fraction': 0.02,
'learning_rate': 0.001,
'max_depth': 6,
'metric':'auc',
'min_data_in_leaf': 100,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'n_jobs': 30,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1
}
result=np.zeros(test.shape[0])
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=5,random_state=10)
best_iteration , best_valid_auc = 0, 0
for counter,(train_index, valid_index) in enumerate(rskf.split(train, train.target),1):
print ("Rep-Fold:",counter)
sys.stdout.flush()
#Train data
t=train.iloc[train_index]
trn_data = lgb.Dataset(t.drop("target",axis=1), label=t.target)
#Validation data
v=train.iloc[valid_index]
val_data = lgb.Dataset(v.drop("target",axis=1), label=v.target)
#Training
model = lgb.train(param, trn_data, 1000000, feature_name=train.columns.tolist()[1:], valid_sets = [trn_data, val_data], verbose_eval=500, early_stopping_rounds = 4000)
result +=model.predict(test)
## feat imp
gain = model.feature_importance('gain')
ft = pd.DataFrame({'feature':train.columns.tolist()[1:],'split':model.feature_importance('split'),'gain':100 * gain / gain.sum()}).sort_values('gain', ascending=False)
print("************ FEAT IMPORTANCE *****************")
print(ft.head(25))
print()
##
_best_valid_auc = model.best_score['valid_1']['auc']
_best_iteration = model.best_iteration
print("best_iteration:",_best_iteration,"- best_valid_auc:",_best_valid_auc )
best_valid_auc +=_best_valid_auc
best_iteration += _best_iteration
submission = pd.read_csv(path/'sample_submission.csv')
submission['target'] = result/counter
filename="{:%Y-%m-%d_%H_%M}_sub_after_tune.csv".format(datetime.now())
submission.to_csv(filename, index=False)
## feat importance
best_valid_auc = best_valid_auc/counter
best_iteration = best_iteration/counter
fh = open("base_light_gbm1.log","w")
print("best_iteration_avg:",best_iteration,"- best_valid_auc_avg:",best_valid_auc,file=fh)
fh.close()
| mit | -557,002,843,498,613,570 | 31.769231 | 171 | 0.664319 | false |
nagyistoce/netzob | src/netzob/Common/Threads/Task.py | 1 | 2501 | # -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
from gettext import gettext as _
class Task(object):
"""Base class for asynchronous tasks."""
def config(self, return_cb, exception_cb):
"""Set return and exception callbacks."""
self.return_cb = return_cb
self.exception_cb = exception_cb
def run(self):
raise RuntimeError("Run method must be overriden")
def cancel(self):
pass
| gpl-3.0 | -3,064,698,557,478,612,500 | 53.282609 | 78 | 0.403284 | false |
hanak/artshow-keeper | tests/datafile.py | 1 | 1532 | # Artshow Keeper: A support tool for keeping an Artshow running.
# Copyright (C) 2014 Ivo Hanak
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import shutil
class Datafile:
def __init__(self, sourceFilename, testName):
self.path = os.path.join(os.path.dirname(__file__), 'data')
self.sourceFilename = os.path.join(self.path, sourceFilename)
filename, fileExt = os.path.splitext(sourceFilename)
self.testSpecificFilename = os.path.join(self.path, filename + testName + fileExt)
shutil.copy(self.sourceFilename, self.testSpecificFilename)
def clear(self):
if os.path.isfile(self.testSpecificFilename):
os.remove(self.testSpecificFilename)
if os.path.isfile(self.testSpecificFilename + '.bak'):
os.remove(self.testSpecificFilename + '.bak')
def getFilename(self):
return self.testSpecificFilename
| gpl-3.0 | 5,881,882,281,821,692,000 | 40.405405 | 90 | 0.710836 | false |
GarrettArm/TheDjangoBook | mysite_project/blog/views.py | 1 | 1361 | from django.shortcuts import render
from django.utils import timezone
from django.views import generic
from .models import Post
class IndexView(generic.ListView):
template_name = "blog/post_list.html"
context_object_name = "posts"
def get_queryset(self):
return Post.objects.filter(published_date__lte=timezone.now()).order_by(
"published_date"
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
for i in context["object_list"]:
i.text_as_list = self.split_to_paragraphs(i.text)
description = ["A description"]
context["description"] = description
return context
def split_to_paragraphs(self, text):
text_list = f"{text[:300]}...".split("\n")
return text_list
class DetailView(generic.DetailView):
template_name = "blog/detail.html"
model = Post
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
print(context)
context["object"].text_as_list = self.split_to_paragraphs(
context["object"].text
)
description = ["Another description"]
context["description"] = description
return context
def split_to_paragraphs(self, text):
text_list = text.split("\n")
return text_list
| gpl-3.0 | -2,322,962,383,464,944,600 | 28.586957 | 80 | 0.626745 | false |
Great-Li-Xin/PythonDev | Games/Chap8/Zombie Mob.py | 1 | 9550 | # MyLibrary.py
import sys, time, random, math, pygame
from pygame.locals import *
# prints text using the supplied font
def print_text(font, x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen = pygame.display.get_surface() # req'd when function moved into MyLibrary
screen.blit(imgText, (x, y))
# MySprite class extends pygame.sprite.Sprite
class MySprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # extend the base Sprite class
self.master_image = None
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.direction = 0
self.velocity = Point(0.0, 0.0)
# X property
def _getx(self):
return self.rect.x
def _setx(self, value):
self.rect.x = value
X = property(_getx, _setx)
# Y property
def _gety(self):
return self.rect.y
def _sety(self, value):
self.rect.y = value
Y = property(_gety, _sety)
# position property
def _getpos(self):
return self.rect.topleft
def _setpos(self, pos):
self.rect.topleft = pos
position = property(_getpos, _setpos)
def load(self, filename, width, height, columns):
self.master_image = pygame.image.load(filename).convert_alpha()
self.frame_width = width
self.frame_height = height
self.rect = Rect(0, 0, width, height)
self.columns = columns
# try to auto-calculate total frames
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
def update(self, current_time, rate=30):
# update animation frame number
if current_time > self.last_time + rate:
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
# build current frame only if it changed
if self.frame != self.old_frame:
frame_x = (self.frame % self.columns) * self.frame_width
frame_y = (self.frame // self.columns) * self.frame_height
rect = Rect(frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
self.old_frame = self.frame
def __str__(self):
return str(self.frame) + "," + str(self.first_frame) + \
"," + str(self.last_frame) + "," + str(self.frame_width) + \
"," + str(self.frame_height) + "," + str(self.columns) + \
"," + str(self.rect)
# Point class
class Point(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
# X property
def getx(self): return self.__x
def setx(self, x): self.__x = x
x = property(getx, setx)
# Y property
def gety(self): return self.__y
def sety(self, y): self.__y = y
y = property(gety, sety)
def __str__(self):
return "{X:" + "{:.0f}".format(self.__x) + ",Y:" + "{:.0f}".format(self.__y) + "}"
def calc_velocity(direction, vel=1.0):
velocity = Point(0, 0)
if direction == 0: # north
velocity.y = -vel
elif direction == 2: # east
velocity.x = vel
elif direction == 4: # south
velocity.y = vel
elif direction == 6: # west
velocity.x = -vel
return velocity
def reverse_direction(sprite):
if sprite.direction == 0:
sprite.direction = 4
elif sprite.direction == 2:
sprite.direction = 6
elif sprite.direction == 4:
sprite.direction = 0
elif sprite.direction == 6:
sprite.direction = 2
if __name__ == '__main__':
# main program begins
pygame.init()
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption("Collision Demo")
font = pygame.font.Font(None, 36)
timer = pygame.time.Clock()
# create sprite groups
player_group = pygame.sprite.Group()
zombie_group = pygame.sprite.Group()
health_group = pygame.sprite.Group()
# create the player sprite
player = MySprite()
player.load("farmer walk.png", 96, 96, 8)
player.position = 80, 80
player.direction = 4
player_group.add(player)
# create the zombie sprite
zombie_image = pygame.image.load("zombie walk.png").convert_alpha()
for counter in range(0, 10):
zombie = MySprite()
zombie.load("zombie walk.png", 96, 96, 8)
zombie.position = random.randint(0, 700), random.randint(0, 500)
zombie.direction = random.randint(0, 3) * 2
zombie_group.add(zombie)
# create heath sprite
health = MySprite()
health.load("health.png", 32, 32, 1)
health.position = 400, 300
health_group.add(health)
game_over = False
player_moving = False
player_health = 100
# repeating loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
elif keys[K_UP] or keys[K_w]:
player.direction = 0
player_moving = True
elif keys[K_RIGHT] or keys[K_d]:
player.direction = 2
player_moving = True
elif keys[K_DOWN] or keys[K_s]:
player.direction = 4
player_moving = True
elif keys[K_LEFT] or keys[K_a]:
player.direction = 6
player_moving = True
else:
player_moving = False
if not game_over:
# set animation frames based on player's direction
player.first_frame = player.direction * player.columns
player.last_frame = player.first_frame + player.columns - 1
if player.frame < player.first_frame:
player.frame = player.first_frame
if not player_moving:
# stop animating when player is not pressing a key
player.frame = player.first_frame = player.last_frame
else:
# move player in direction
player.velocity = calc_velocity(player.direction, 1.5)
player.velocity.x *= 1.5
player.velocity.y *= 1.5
# update player sprite
player_group.update(ticks, 50)
# manually move the player
if player_moving:
player.X += player.velocity.x
player.Y += player.velocity.y
if player.X < 0:
player.X = 0
elif player.X > 700:
player.X = 700
if player.Y < 0:
player.Y = 0
elif player.Y > 500:
player.Y = 500
# update zombie sprites
zombie_group.update(ticks, 50)
# manually iterate through all the zombies
for single_zombie in zombie_group:
# set the zombie's animation range
single_zombie.first_frame = single_zombie.direction * single_zombie.columns
single_zombie.last_frame = single_zombie.first_frame + single_zombie.columns - 1
if single_zombie.frame < single_zombie.first_frame:
single_zombie.frame = single_zombie.first_frame
single_zombie.velocity = calc_velocity(single_zombie.direction)
# keep the zombie on the screen
single_zombie.X += single_zombie.velocity.x
single_zombie.Y += single_zombie.velocity.y
if single_zombie.X < 0 or single_zombie.X > 700 or single_zombie.Y < 0 or single_zombie.Y > 500:
reverse_direction(single_zombie)
# check for collision with zombies
attacker = None
attacker = pygame.sprite.spritecollideany(player, zombie_group)
if attacker is not None:
# we got a hit, now do a more precise check
if pygame.sprite.collide_rect_ratio(0.5)(player, attacker):
player_health -= 10
if attacker.X < player.X:
attacker.X -= 10
elif attacker.X > player.X:
attacker.X += 10
else:
attacker = None
# update the health drop
health_group.update(ticks, 50)
# check for collision with health
if pygame.sprite.collide_rect_ratio(0.5)(player, health):
player_health += 30
if player_health > 100: player_health = 100
health.X = random.randint(0, 700)
health.Y = random.randint(0, 500)
# is player dead?
if player_health <= 0:
game_over = True
# clear the screen
screen.fill((50, 50, 100))
# draw sprites
health_group.draw(screen)
zombie_group.draw(screen)
player_group.draw(screen)
# draw energy bar
pygame.draw.rect(screen, (50, 150, 50, 180), Rect(300, 570, player_health * 2, 25))
pygame.draw.rect(screen, (100, 200, 100, 180), Rect(300, 570, 200, 25), 2)
if game_over:
print_text(font, 300, 100, "G A M E O V E R")
pygame.display.update()
| mit | 5,144,566,735,357,413,000 | 31.263514 | 112 | 0.553298 | false |
oculusstorystudio/kraken | Python/kraken/ui/HAppkit_Editors/editor_widgets/integer_editor.py | 1 | 2518 |
import sys
from kraken.ui.Qt import QtWidgets, QtGui, QtCore
from ..fe import FE
from ..widget_factory import EditorFactory
from ..base_editor import BaseValueEditor
class IntegerEditor(BaseValueEditor):
def __init__(self, valueController, parent=None):
super(IntegerEditor, self).__init__(valueController, parent=parent)
hbox = QtWidgets.QHBoxLayout()
self._editor = QtWidgets.QSpinBox(self)
if(self._dataType == 'UInt8' or
self._dataType == 'UInt16' or
self._dataType == 'UInt32' or
self._dataType == 'UInt64' or
self._dataType == 'Index' or
self._dataType == 'Size' or
self._dataType == 'Byte'):
self._editor.setMinimum(0)
else:
self._editor.setMinimum(-100000000)
self._editor.setMaximum(100000000)
self._editor.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
hbox.addWidget(self._editor, 1)
hbox.addStretch(0)
hbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(hbox)
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
self.updateEditorValue()
self._editor.valueChanged.connect(self._setValueToController)
self.setEditable( valueController.isEditable() )
def setEditable(self, editable):
self._editor.setReadOnly( not editable )
def getEditorValue(self):
value = self._editor.value()
return value#self._klType(value)
def setEditorValue(self, value):
# Clamp values to avoid OverflowError
if value > sys.maxint:
value = sys.maxint
elif value < -sys.maxint:
value = -sys.maxint
self._editor.setValue(value)
@classmethod
def canDisplay(cls, valueController):
dataType = valueController.getDataType()
return (dataType == 'Integer' or
dataType == 'UInt8' or
dataType == 'SInt8' or
dataType == 'UInt16' or
dataType == 'SInt16' or
dataType == 'UInt32' or
dataType == 'SInt32' or
dataType == 'UInt64' or
dataType == 'SInt64' or
dataType == 'Index' or
dataType == 'Size' or
dataType == 'Byte')
EditorFactory.registerEditorClass(IntegerEditor)
| bsd-3-clause | -3,679,933,215,878,452,700 | 30.475 | 96 | 0.575854 | false |
DucQuang1/py-earth | doc/generate_figures.py | 1 | 1933 | import matplotlib as mpl
mpl.use('Agg')
import numpy
from pyearth import Earth
from matplotlib import pyplot
#=========================================================================
# V-Function Example
#=========================================================================
# Create some fake data
numpy.random.seed(0)
m = 1000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y = numpy.abs(X[:, 6] - 4.0) + 1 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth()
model.fit(X, y)
# Print the model
print model.trace()
print model.summary()
# Plot the model
y_hat = model.predict(X)
pyplot.figure()
pyplot.plot(X[:, 6], y, 'r.')
pyplot.plot(X[:, 6], y_hat, 'b.')
pyplot.xlabel('x_6')
pyplot.ylabel('y')
pyplot.title('Simple Earth Example')
pyplot.savefig('simple_earth_example.png')
#=========================================================================
# Hinge plot
#=========================================================================
from xkcdify import XKCDify
x = numpy.arange(-10, 10, .1)
y = x * (x > 0)
fig = pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.title('Basic Hinge Function')
pyplot.xlabel('x')
pyplot.ylabel('h(x)')
pyplot.annotate('x=t', (0, 0), xytext=(-30, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('hinge.png')
#=========================================================================
# Piecewise Linear Plot
#=========================================================================
m = 1000
x = numpy.arange(-10, 10, .1)
y = 1 - 2 * (1 - x) * (x < 1) + 0.5 * (x - 1) * (x > 1)
pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('Piecewise Linear Function')
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('piecewise_linear.png')
| bsd-3-clause | 4,116,783,869,634,021,400 | 26.614286 | 80 | 0.510088 | false |
mjamesruggiero/tripp | setup.py | 1 | 1394 | s!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='tripp',
version='0.1.0',
description=""Data science fundamentals"",
long_description=readme + '\n\n' + history,
author="Michael Ruggiero",
author_email='[email protected]',
url='https://github.com/mjamesruggiero/tripp',
packages=[
'tripp',
],
package_dir={'tripp':
'tripp'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='tripp',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| bsd-3-clause | 1,882,682,804,747,385,900 | 24.814815 | 50 | 0.609039 | false |
bvesperman/Sector67RaspberryPiAccessControl | space_machines/door.py | 1 | 5509 | import logging
import time
import Queue
import threading
from Tkinter import *
from pystates import StateMachine
class DoorState(StateMachine):
def CLOSED_LOCKED(self):
self.generate_message({"event": self.name + "_CLOSED_LOCKED"})
if self.show_gui: self.v.set("CLOSED_LOCKED")
self.log.debug("turn off solenoid")
self.generate_message({"event": self.name + "_LOCK_DOOR"})
while True:
ev = yield
if ev['event'] == "VALID_KEY":
self.transition(self.CLOSED_UNLOCKING)
if ev['event'] == "DOOR_OPENED":
self.transition(self.FORCED_OPEN)
def CLOSED_UNLOCKING(self):
self.generate_message({"event": self.name + "_CLOSED_UNLOCKING", "timeout": self.unlock_timeout})
if self.show_gui: self.v.set("CLOSED_UNLOCKING")
self.log.debug("turn on solenoid")
self.generate_message({"event": self.name + "_UNLOCK_DOOR"})
self.log.debug("waiting up to " + str(self.unlock_timeout) + " seconds")
while True:
ev = yield
if ev['event'] == "DOOR_OPENED":
self.log.debug('Unlocked and opened')
self.transition(self.OPEN_UNLOCKING)
if self.duration() > self.unlock_timeout:
self.log.debug('Unlocked but not opened')
self.transition(self.CLOSED_LOCKED)
def OPEN_UNLOCKING(self):
self.generate_message({"event": self.name + "_OPEN_UNLOCKING"})
if self.show_gui: self.v.set("OPEN_UNLOCKING")
self.log.debug("waiting up to " + str(self.open_unlock_timeout) + " seconds")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.open_unlock_timeout:
self.transition(self.OPEN_LOCKED)
def OPEN_LOCKED(self):
self.generate_message({"event": self.name + "_OPEN_LOCKED"})
if self.show_gui: self.v.set("OPEN_LOCKED")
self.log.debug("turn off solenoid")
self.generate_message({"event": self.name + "_LOCK_DOOR"})
self.log.debug("waiting up to " + str(self.stuck_open_timeout) + "seconds")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.stuck_open_timeout:
self.log.debug("timeout!")
self.transition(self.STUCK_OPEN)
def STUCK_OPEN(self):
self.generate_message({"event": self.name + "_STUCK_OPEN"})
if self.show_gui: self.v.set("STUCK_OPEN")
self.log.debug("door stuck open")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door finally closed')
self.transition(self.CLOSED_LOCKED)
def FORCED_OPEN(self):
if self.show_gui: self.v.set("FORCED_OPEN")
self.generate_message({"event": self.name + "_FORCED_OPEN"})
self.log.debug("door forced open")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.stuck_open_timeout:
self.log.debug("timeout!")
self.transition(self.STUCK_OPEN)
def setup(self, out_queue, name, unlock_timeout=5, open_unlock_timeout=1, stuck_open_timeout=15):
self.log = logging.getLogger("DoorState")
self.out_queue = out_queue
self.name = name
self.unlock_timeout = int(unlock_timeout)
self.open_unlock_timeout = int(open_unlock_timeout)
self.stuck_open_timeout = int(stuck_open_timeout)
""" Perform initialization here, detect the current state and send that
to the super class start.
"""
def start(self):
# assume a starting state of CLOSED_LOCKED and appropriate messages will send it to the correct state
super(DoorState, self).start(self.CLOSED_LOCKED)
def config_gui(self, root):
self.show_gui = True
# Set up the GUI part
frame = LabelFrame(root, text=self.name, padx=5, pady=5)
frame.pack(fill=X)
self.v = StringVar()
self.v.set("UNKNOWN")
w = Label(frame, textvariable=self.v)
w.pack(side=LEFT)
def main():
out_queue = Queue.Queue()
logging.basicConfig(level=logging.DEBUG)
name = "TEST_DOOR"
doorstate = DoorState(name=name)
doorstate.setup(out_queue, name=name)
doorstate.start()
doorstate.send_message({"event": "VALID_KEY"})
logging.info('unlock the door, open then close it')
doorstate.send_message({"event":"VALID_KEY"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('current state:' + doorstate.current_state())
logging.info('unlock the door but do not open it')
time.sleep(2)
doorstate.send_message({"event":"VALID_KEY"})
time.sleep(10)
logging.info('open the door and close it quickly')
time.sleep(0.1)
doorstate.send_message({"event":"VALID_KEY"})
doorstate.send_message({"event":"DOOR_OPENED"})
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('open the door and leave it open for 30 seconds')
time.sleep(2)
doorstate.send_message({"event":"VALID_KEY"})
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(30)
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('force the door open')
time.sleep(2)
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
if __name__=='__main__':
main()
| mit | 492,777,001,027,727,100 | 32.186747 | 105 | 0.656744 | false |
rmvanhees/pynadc | pynadc/scia/hk.py | 1 | 6316 | """
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Routines to convert Sciamachy house-keeping data from raw counts
to physical units.
Copyright (c) 2018 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import timedelta
import numpy as np
from bitstring import BitArray
def get_det_temp(channel, raw_tm):
"""
convert raw temperature counts to Kelvin
"""
nch = channel - 1
if nch < 0 or nch > 7:
raise ValueError('channel must be between 1 and 8')
tab_tm = [
(0, 17876, 18312, 18741, 19161, 19574, 19980, 20379,
20771, 21157, 21908, 22636, 24684, 26550, 28259, 65535),
(0, 18018, 18456, 18886, 19309, 19724, 20131, 20532,
20926, 21313, 22068, 22798, 24852, 26724, 28436, 65535),
(0, 20601, 20996, 21384, 21765, 22140, 22509, 22872,
23229, 23581, 23927, 24932, 26201, 27396, 28523, 65535),
(0, 20333, 20725, 21110, 21490, 21863, 22230, 22591,
22946, 23295, 23640, 24640, 25905, 27097, 28222, 65535),
(0, 20548, 20942, 21330, 21711, 22086, 22454, 22817,
23174, 23525, 23871, 24875, 26144, 27339, 28466, 65535),
(0, 17893, 18329, 18758, 19179, 19593, 20000, 20399,
20792, 21178, 21931, 22659, 24709, 26578, 28289, 65535),
(0, 12994, 13526, 14046, 14555, 15054, 15543, 16022,
16492, 17850, 20352, 22609, 24656, 26523, 28232, 65535),
(0, 13129, 13664, 14188, 14702, 15204, 15697, 16180,
16653, 18019, 20536, 22804, 24860, 26733, 28447, 65535)
] # shape (8, 16)
tab_temp = [
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(129., 130., 135., 140., 145., 150., 155., 160.,
165., 180., 210., 240., 270., 300., 330., 331.),
(129., 130., 135., 140., 145., 150., 155., 160.,
165., 180., 210., 240., 270., 300., 330., 331.)
] # shape (8, 16)
# use linear interpolation (nothing fancy)
return np.interp(raw_tm, tab_tm[nch], tab_temp[nch])
def get_det_vis_pet(chan_hdr):
"""
convert raw timing data to detector data to pixel-exposure-time (VIS)
"""
# The layout of the detector command word for channels 1--5
# 14 bits: exposure time factor (ETF)
# ETF >= 1: pet = etf * 62.5 ms * ratio
# ETF == 0: pet = 31.25 ms
# 2 bits: mode
# 0: Normal Mode
# 1: Normal Mode
# 2: Test Mode
# 3: ADC calibration
# 9 bits: section address (2 pixels resolution)
# start of virtual channel b at 2 * section
# 5 bits: ratio
# ratio of exposure time between virtual channels
# 2 bits: control
# 1: restart of readout cycle
# 3: hardware reset of detector module electronics
#
command = BitArray(uintbe=chan_hdr['command'], length=32)
etf = int(command.bin[0:14], 2)
section = int(command.bin[16:25], 2)
ratio = int(command.bin[25:30], 2)
vir_chan_b = 2 * section
if etf == 0:
return (1 / 32, vir_chan_b)
pet = etf / 16
if section > 0 and ratio > 1:
return ([pet * ratio, pet], vir_chan_b)
return (pet, vir_chan_b)
def get_det_ir_pet(chan_hdr):
"""
convert raw timing data to detector data to pixel-exposure-time (IR)
"""
# The layout of the detector command word for channels 6--8
# 14 bits: exposure time factor (ETF)
# ETF >= 1: pet = etf * 62.5 ms * ratio
# ETF == 0: pet = 31.25 ms
# 2 bits: mode
# 0: Normal Mode
# 1: Hot Mode
# 2: Test Mode
# 3: ADC calibration
# 2 bits: comp. mode, sets the offset compensation
# 0: no offset compensation
# 1: store offset compensation
# 2: use stored offset
# 3: continuous offset
# 3 bits: not used
# 3 bits: fine bias settings [mV]
# (-16, 10, -5, -3, -2, -1, 0, 2)
# 2 bits: not used
# 4 bits: short pixel exposure time for Hot mode
# pet = 28.125e-6 * 2^x with x <= 10
# 2 bits: control
# 1: restart of readout cycle
# 3: hardware reset of detector module electronics
#
command = BitArray(uintbe=chan_hdr['command'], length=32)
etf = int(command.bin[0:14], 2)
mode = int(command.bin[14:16], 2)
spet = int(command.bin[26:30], 2)
# hot mode
if mode == 1:
return 28.125e-6 * 2 ** min(spet, 10)
# normal mode
if etf == 0:
return 1 / 32
return etf / 16
def mjd_to_datetime(state_id, det_isp):
"""
Calculates datetime at end of each integration time
"""
# BCPS enable delay per instrument state
ri_delay = (0,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 111, 86,
303, 86, 86, 86, 86, 86, 86, 86, 111, 303)
# Add BCPS H/W delay (92.765 ms)
_ri = 0.092765 + ri_delay[state_id] / 256
# the function datetime.timedelta only accepts Python integers
mst_time = np.full(det_isp.size, np.datetime64('2000', 'us'))
for ni, dsr in enumerate(det_isp):
days = int(dsr['mjd']['days'])
secnds = int(dsr['mjd']['secnds'])
musec = int(dsr['mjd']['musec']
+ 1e6 * (dsr['chan_data']['hdr']['bcps'][0] / 16 + _ri))
mst_time[ni] += np.timedelta64(timedelta(days, secnds, musec))
return mst_time
| bsd-3-clause | -2,944,959,474,597,993,500 | 34.483146 | 76 | 0.542749 | false |
40223233/2015cd_midterm | wsgi.py | 1 | 26126 | #@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def twoDgear(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
# 以下使用中文變數名稱
畫布 = document["plotarea"]
ctx = 畫布.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def index(self, N=None, M=None, P=None):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest>
<p>學號:40223233 姓名:彭于亮
<p>齒數:
<select name=N>
<option>10
<option>20
<option>30
<option>40
<option>50
</select>
<p>模數:
<select name=M>
<option>2
<option>4
<option>6
<option>8
<option>10
</select>
<p>壓力角:
<select name=P>
<option>10
<option>12
<option>14
<option>16
<option>18
<option>20
</select>
</br>
<input type=submit value=送出>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=50, M=5, P=20):
D = int(N)*int(M)
outstring = '''
<!DOCTYPE html>
<html>
<head>
齒輪已完成。
<a href='index'>返回上一頁</a>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos('''+str(P)+'''*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan('''+str(P)+'''*deg)-'''+str(P)+'''*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(500,500,'''+str(D)+''','''+str(N)+''',"black")
</script>
<canvas id="plotarea" width="2000" height="1000"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
| gpl-2.0 | -7,574,877,212,437,480,000 | 28.628342 | 137 | 0.549093 | false |
opentrials/processors | processors/base/writers/fda_application.py | 1 | 1620 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from . import write_organisation
from .. import helpers
logger = logging.getLogger(__name__)
# Module API
def write_fda_application(conn, fda_application, source_id):
"""Write fda_application to database.
Args:
conn (dict): connection dict
fda_application (dict): normalized data
source_id (str): data source id
Raises:
KeyError: if data structure is not valid
Returns:
str/None: object identifier/if not written (skipped)
"""
if 'organisation' in fda_application:
organisation_name = fda_application['organisation']
del fda_application['organisation']
slug = helpers.slugify_string(organisation_name)
organisation = conn['database']['organisations'].find_one(slug=slug)
if not organisation:
organisation = {
'name': organisation_name,
'slug': slug,
}
organisation_id = write_organisation(conn, organisation, source_id)
else:
organisation_id = organisation['id']
fda_application['organisation_id'] = organisation_id
conn['database']['fda_applications'].upsert(fda_application,
['id'],
ensure=False)
# Log debug
logger.debug('FDA Application upserted: %s', fda_application['id'])
return fda_application['id']
| mit | -2,835,677,600,094,138,400 | 29.566038 | 79 | 0.605556 | false |
jparyani/Mailpile | mailpile/plugins/setup_magic.py | 1 | 40822 | import os
import random
import sys
import datetime
from urllib import urlencode
import mailpile.auth
from mailpile.defaults import CONFIG_RULES
from mailpile.i18n import ListTranslations, ActivateTranslation, gettext
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.plugins import PLUGINS
from mailpile.plugins.contacts import AddProfile
from mailpile.plugins.contacts import ListProfiles
from mailpile.plugins.migrate import Migrate
from mailpile.plugins.tags import AddTag
from mailpile.commands import Command
from mailpile.config import SecurePassphraseStorage
from mailpile.crypto.gpgi import GnuPG, SignatureInfo, EncryptionInfo
from mailpile.crypto.gpgi import GnuPGKeyGenerator, GnuPGKeyEditor
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
from mailpile.smtp_client import SendMail, SendMailError
from mailpile.urlmap import UrlMap
from mailpile.ui import Session, SilentInteraction
from mailpile.util import *
_ = lambda s: s
_plugins = PluginManager(builtin=__file__)
##[ Commands ]################################################################
class SetupMagic(Command):
"""Perform initial setup"""
SYNOPSIS = (None, None, None, None)
ORDER = ('Internals', 0)
LOG_PROGRESS = True
TAGS = {
'New': {
'type': 'unread',
'label': False,
'display': 'invisible',
'icon': 'icon-new',
'label_color': '03-gray-dark',
'name': _('New'),
},
'Inbox': {
'type': 'inbox',
'display': 'priority',
'display_order': 2,
'icon': 'icon-inbox',
'label_color': '06-blue',
'name': _('Inbox'),
},
'Blank': {
'type': 'blank',
'flag_editable': True,
'display': 'invisible',
'name': _('Blank'),
},
'Drafts': {
'type': 'drafts',
'flag_editable': True,
'display': 'priority',
'display_order': 1,
'icon': 'icon-compose',
'label_color': '03-gray-dark',
'name': _('Drafts'),
},
'Outbox': {
'type': 'outbox',
'display': 'priority',
'display_order': 3,
'icon': 'icon-outbox',
'label_color': '06-blue',
'name': _('Outbox'),
},
'Sent': {
'type': 'sent',
'display': 'priority',
'display_order': 4,
'icon': 'icon-sent',
'label_color': '03-gray-dark',
'name': _('Sent'),
},
'Spam': {
'type': 'spam',
'flag_hides': True,
'display': 'priority',
'display_order': 5,
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('Spam'),
},
'MaybeSpam': {
'display': 'invisible',
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('MaybeSpam'),
},
'Ham': {
'type': 'ham',
'display': 'invisible',
'name': _('Ham'),
},
'Trash': {
'type': 'trash',
'flag_hides': True,
'display': 'priority',
'display_order': 6,
'icon': 'icon-trash',
'label_color': '13-brown',
'name': _('Trash'),
},
# These are magical tags that perform searches and show
# messages in contextual views.
'All Mail': {
'type': 'tag',
'icon': 'icon-logo',
'label_color': '06-blue',
'search_terms': 'all:mail',
'name': _('All Mail'),
'display_order': 1000,
},
'Photos': {
'type': 'tag',
'icon': 'icon-photos',
'label_color': '08-green',
'search_terms': 'att:jpg',
'name': _('Photos'),
'template': 'photos',
'display_order': 1001,
},
'Files': {
'type': 'tag',
'icon': 'icon-document',
'label_color': '06-blue',
'search_terms': 'has:attachment',
'name': _('Files'),
'template': 'files',
'display_order': 1002,
},
'Links': {
'type': 'tag',
'icon': 'icon-links',
'label_color': '12-red',
'search_terms': 'http',
'name': _('Links'),
'display_order': 1003,
},
# These are internal tags, used for tracking user actions on
# messages, as input for machine learning algorithms. These get
# automatically added, and may be automatically removed as well
# to keep the working sets reasonably small.
'mp_rpl': {'type': 'replied', 'label': False, 'display': 'invisible'},
'mp_fwd': {'type': 'fwded', 'label': False, 'display': 'invisible'},
'mp_tag': {'type': 'tagged', 'label': False, 'display': 'invisible'},
'mp_read': {'type': 'read', 'label': False, 'display': 'invisible'},
'mp_ham': {'type': 'ham', 'label': False, 'display': 'invisible'},
}
def basic_app_config(self, session,
save_and_update_workers=True,
want_daemons=True):
# Create local mailboxes
session.config.open_local_mailbox(session)
# Create standard tags and filters
created = []
for t in self.TAGS:
if not session.config.get_tag_id(t):
AddTag(session, arg=[t]).run(save=False)
created.append(t)
session.config.get_tag(t).update(self.TAGS[t])
for stype, statuses in (('sig', SignatureInfo.STATUSES),
('enc', EncryptionInfo.STATUSES)):
for status in statuses:
tagname = 'mp_%s-%s' % (stype, status)
if not session.config.get_tag_id(tagname):
AddTag(session, arg=[tagname]).run(save=False)
created.append(tagname)
session.config.get_tag(tagname).update({
'type': 'attribute',
'display': 'invisible',
'label': False,
})
if 'New' in created:
session.ui.notify(_('Created default tags'))
# Import all the basic plugins
reload_config = False
for plugin in PLUGINS:
if plugin not in session.config.sys.plugins:
session.config.sys.plugins.append(plugin)
reload_config = True
for plugin in session.config.plugins.WANTED:
if plugin in session.config.plugins.available():
session.config.sys.plugins.append(plugin)
if reload_config:
with session.config._lock:
session.config.save()
session.config.load(session)
try:
# If spambayes is not installed, this will fail
import mailpile.plugins.autotag_sb
if 'autotag_sb' not in session.config.sys.plugins:
session.config.sys.plugins.append('autotag_sb')
session.ui.notify(_('Enabling spambayes autotagger'))
except ImportError:
session.ui.warning(_('Please install spambayes '
'for super awesome spam filtering'))
vcard_importers = session.config.prefs.vcard.importers
if not vcard_importers.gravatar:
vcard_importers.gravatar.append({'active': True})
session.ui.notify(_('Enabling gravatar image importer'))
gpg_home = os.path.expanduser('~/.gnupg')
if os.path.exists(gpg_home) and not vcard_importers.gpg:
vcard_importers.gpg.append({'active': True,
'gpg_home': gpg_home})
session.ui.notify(_('Importing contacts from GPG keyring'))
if ('autotag_sb' in session.config.sys.plugins and
len(session.config.prefs.autotag) == 0):
session.config.prefs.autotag.append({
'match_tag': 'spam',
'unsure_tag': 'maybespam',
'tagger': 'spambayes',
'trainer': 'spambayes'
})
session.config.prefs.autotag[0].exclude_tags[0] = 'ham'
if save_and_update_workers:
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
def setup_command(self, session, do_gpg_stuff=False):
do_gpg_stuff = do_gpg_stuff or ('do_gpg_stuff' in self.args)
# Stop the workers...
want_daemons = session.config.cron_worker is not None
session.config.stop_workers()
# Perform any required migrations
Migrate(session).run(before_setup=True, after_setup=False)
# Basic app config, tags, plugins, etc.
self.basic_app_config(session,
save_and_update_workers=False,
want_daemons=want_daemons)
# Assumption: If you already have secret keys, you want to
# use the associated addresses for your e-mail.
# If you don't already have secret keys, you should have
# one made for you, if GnuPG is available.
# If GnuPG is not available, you should be warned.
if do_gpg_stuff:
gnupg = GnuPG(None)
accepted_keys = []
if gnupg.is_available():
keys = gnupg.list_secret_keys()
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
for key, details in keys.iteritems():
# Ignore revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
accepted_keys.append(key)
for uid in details["uids"]:
if "email" not in uid or uid["email"] == "":
continue
if uid["email"] in [x["email"]
for x in session.config.profiles]:
# Don't set up the same e-mail address twice.
continue
# FIXME: Add route discovery mechanism.
profile = {
"email": uid["email"],
"name": uid["name"],
}
session.config.profiles.append(profile)
if (session.config.prefs.gpg_recipient in (None, '', '!CREATE')
and details["capabilities_map"]["encrypt"]):
session.config.prefs.gpg_recipient = key
session.ui.notify(_('Encrypting config to %s') % key)
if session.config.prefs.crypto_policy == 'none':
session.config.prefs.crypto_policy = 'openpgp-sign'
if len(accepted_keys) == 0:
# FIXME: Start background process generating a key once a
# user has supplied a name and e-mail address.
pass
else:
session.ui.warning(_('Oh no, PGP/GPG support is unavailable!'))
# If we have a GPG key, but no master key, create it
self.make_master_key()
# Perform any required migrations
Migrate(session).run(before_setup=False, after_setup=True)
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
return self._success(_('Performed initial Mailpile setup'))
def make_master_key(self):
session = self.session
if (session.config.prefs.gpg_recipient not in (None, '', '!CREATE')
and not session.config.master_key
and not session.config.prefs.obfuscate_index):
#
# This secret is arguably the most critical bit of data in the
# app, it is used as an encryption key and to seed hashes in
# a few places. As such, the user may need to type this in
# manually as part of data recovery, so we keep it reasonably
# sized and devoid of confusing chars.
#
# The strategy below should give about 281 bits of randomness:
#
# import math
# math.log((25 + 25 + 8) ** (12 * 4), 2) == 281.183...
#
secret = ''
chars = 12 * 4
while len(secret) < chars:
secret = sha512b64(os.urandom(1024),
'%s' % session.config,
'%s' % time.time())
secret = CleanText(secret,
banned=CleanText.NONALNUM + 'O01l'
).clean[:chars]
session.config.master_key = secret
if self._idx() and self._idx().INDEX:
session.ui.warning(_('Unable to obfuscate search index '
'without losing data. Not indexing '
'encrypted mail.'))
else:
session.config.prefs.obfuscate_index = True
session.config.prefs.index_encrypted = True
session.ui.notify(_('Obfuscating search index and enabling '
'indexing of encrypted e-mail. Yay!'))
return True
else:
return False
def command(self, *args, **kwargs):
session = self.session
if session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
return self.setup_command(session, *args, **kwargs)
class TestableWebbable(SetupMagic):
HTTP_AUTH_REQUIRED = 'Maybe'
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {
'_path': 'Redirect path'
}
HTTP_POST_VARS = {
'testing': 'Yes or No, if testing',
'advance': 'Yes or No, advance setup flow',
}
TRUTHY = {
'0': False, 'no': False, 'fuckno': False, 'false': False,
'1': True, 'yes': True, 'hellyeah': True, 'true': True,
}
def _advance(self):
path = self.data.get('_path', [None])[0]
data = dict([(k, v) for k, v in self.data.iteritems()
if k not in self.HTTP_POST_VARS
and k not in ('_method',)])
nxt = Setup.Next(self.session.config, None, needed_auth=False)
if nxt:
url = '/%s/' % nxt.SYNOPSIS[2]
elif path and path != '/%s/' % Setup.SYNOPSIS[2]:
# Use the same redirection logic as the Authenticator
mailpile.auth.Authenticate.RedirectBack(path, data)
else:
url = '/'
qs = urlencode([(k, v) for k, vl in data.iteritems() for v in vl])
raise UrlRedirectException(''.join([url, '?%s' % qs if qs else '']))
def _success(self, message, result=True, advance=False):
if (advance or
self.TRUTHY.get(self.data.get('advance', ['no'])[0].lower())):
self._advance()
return SetupMagic._success(self, message, result=result)
def _testing(self):
self._testing_yes(lambda: True)
return (self.testing is not None)
def _testing_yes(self, method, *args, **kwargs):
testination = self.data.get('testing')
if testination:
self.testing = random.randint(0, 1)
if testination[0].lower() in self.TRUTHY:
self.testing = self.TRUTHY[testination[0].lower()]
return self.testing
self.testing = None
return method(*args, **kwargs)
def _testing_data(self, method, tdata, *args, **kwargs):
result = self._testing_yes(method, *args, **kwargs) or []
return (result
if (self.testing is None) else
(self.testing and tdata or []))
def setup_command(self, session):
raise Exception('FIXME')
class SetupGetEmailSettings(TestableWebbable):
"""Guess server details for an e-mail address"""
SYNOPSIS = (None, 'setup/email_servers', 'setup/email_servers', None)
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
'email': 'E-mail address'
})
TEST_DATA = {
'imap_host': 'imap.wigglebonk.com',
'imap_port': 993,
'imap_tls': True,
'pop3_host': 'pop3.wigglebonk.com',
'pop3_port': 110,
'pop3_tls': False,
'smtp_host': 'smtp.wigglebonk.com',
'smtp_port': 465,
'smtp_tls': False
}
def _get_domain_settings(self, domain):
raise Exception('FIXME')
def setup_command(self, session):
results = {}
for email in list(self.args) + self.data.get('email'):
settings = self._testing_data(self._get_domain_settings,
self.TEST_DATA, email)
if settings:
results[email] = settings
if results:
self._success(_('Found settings for %d addresses'), results)
else:
self._error(_('No settings found'))
class SetupWelcome(TestableWebbable):
SYNOPSIS = (None, None, 'setup/welcome', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'language': 'Language selection'
})
def bg_setup_stage_1(self):
# Wait a bit, so the user has something to look at befor we
# block the web server and do real work.
time.sleep(2)
# Intial configuration of app goes here...
if not self.session.config.tags:
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
self.basic_app_config(self.session)
# Next, if we have any secret GPG keys, extract all the e-mail
# addresses and create a profile for each one.
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
SetupProfiles(self.session).auto_create_profiles()
def setup_command(self, session):
config = session.config
if self.data.get('_method') == 'POST' or self._testing():
language = self.data.get('language', [''])[0]
if language:
try:
i18n = lambda: ActivateTranslation(session, config,
language)
if not self._testing_yes(i18n):
raise ValueError('Failed to configure i18n')
config.prefs.language = language
if not self._testing():
self._background_save(config=True)
except ValueError:
return self._error(_('Invalid language: %s') % language)
config.slow_worker.add_unique_task(
session, 'Setup, Stage 1', lambda: self.bg_setup_stage_1())
results = {
'languages': ListTranslations(config),
'language': config.prefs.language
}
return self._success(_('Welcome to Mailpile!'), results)
class SetupCrypto(TestableWebbable):
SYNOPSIS = (None, None, 'setup/crypto', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'choose_key': 'Select an existing key to use',
'passphrase': 'Specify a passphrase',
'passphrase_confirm': 'Confirm the passphrase',
'index_encrypted': 'y/n: index encrypted mail?',
# 'obfuscate_index': 'y/n: obfuscate keywords?', # Omitted do to DANGER
'encrypt_mail': 'y/n: encrypt locally stored mail?',
'encrypt_index': 'y/n: encrypt search index?',
'encrypt_vcards': 'y/n: encrypt vcards?',
'encrypt_events': 'y/n: encrypt event log?',
'encrypt_misc': 'y/n: encrypt plugin and misc data?'
})
TEST_DATA = {}
def list_secret_keys(self):
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
keylist = {}
for key, details in self._gnupg().list_secret_keys().iteritems():
# Ignore (soon to be) revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
# Ignore keys that cannot both encrypt and sign
caps = details["capabilities_map"]
if not caps["encrypt"] or not caps["sign"]:
continue
keylist[key] = details
return keylist
def gpg_key_ready(self, gpg_keygen):
if not gpg_keygen.failed:
self.session.config.prefs.gpg_recipient = gpg_keygen.generated_key
self.make_master_key()
self._background_save(config=True)
self.save_profiles_to_key()
def save_profiles_to_key(self, key_id=None, add_all=False, now=False,
profiles=None):
if key_id is None:
if (Setup.KEY_CREATING_THREAD and
not Setup.KEY_CREATING_THREAD.failed):
key_id = Setup.KEY_CREATING_THREAD.generated_key
add_all = True
if not add_all:
self.session.ui.warning('FIXME: Not updating GPG key!')
return
if key_id is not None:
uids = []
data = ListProfiles(self.session).run().result
for profile in data['profiles']:
uids.append({
'name': profile["fn"],
'email': profile["email"][0]["email"],
'comment': profile.get('note', '')
})
if not uids:
return
editor = GnuPGKeyEditor(key_id, set_uids=uids,
sps=self.session.config.gnupg_passphrase,
deletes=max(10, 2*len(uids)))
def start_editor(*unused_args):
with Setup.KEY_WORKER_LOCK:
Setup.KEY_EDITING_THREAD = editor
editor.start()
with Setup.KEY_WORKER_LOCK:
if now:
start_editor()
elif Setup.KEY_EDITING_THREAD is not None:
Setup.KEY_EDITING_THREAD.on_complete('edit keys',
start_editor)
elif Setup.KEY_CREATING_THREAD is not None:
Setup.KEY_CREATING_THREAD.on_complete('edit keys',
start_editor)
else:
start_editor()
def setup_command(self, session):
changed = authed = False
results = {
'secret_keys': self.list_secret_keys(),
}
error_info = None
if self.data.get('_method') == 'POST' or self._testing():
# 1st, are we choosing or creating a new key?
choose_key = self.data.get('choose_key', [''])[0]
if choose_key and not error_info:
if (choose_key not in results['secret_keys'] and
choose_key != '!CREATE'):
error_info = (_('Invalid key'), {
'invalid_key': True,
'chosen_key': choose_key
})
# 2nd, check authentication...
#
# FIXME: Creating a new key will allow a malicious actor to
# bypass authentication and change settings.
#
try:
passphrase = self.data.get('passphrase', [''])[0]
passphrase2 = self.data.get('passphrase_confirm', [''])[0]
chosen_key = ((not error_info) and choose_key
) or session.config.prefs.gpg_recipient
if not error_info:
assert(passphrase == passphrase2)
if chosen_key == '!CREATE':
assert(passphrase != '')
sps = SecurePassphraseStorage(passphrase)
elif chosen_key:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config,
passphrase=passphrase,
key=chosen_key)
else:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config, passphrase=passphrase)
if not chosen_key:
choose_key = '!CREATE'
results['updated_passphrase'] = True
session.config.gnupg_passphrase.data = sps.data
mailpile.auth.SetLoggedIn(self)
except AssertionError:
error_info = (_('Invalid passphrase'), {
'invalid_passphrase': True,
'chosen_key': session.config.prefs.gpg_recipient
})
# 3rd, if necessary master key and/or GPG key
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
if choose_key and not error_info:
session.config.prefs.gpg_recipient = choose_key
# FIXME: This should probably only happen if the GPG
# key was successfully created.
self.make_master_key()
changed = True
with Setup.KEY_WORKER_LOCK:
if ((not error_info) and
(session.config.prefs.gpg_recipient
== '!CREATE') and
(Setup.KEY_CREATING_THREAD is None or
Setup.KEY_CREATING_THREAD.failed)):
gk = GnuPGKeyGenerator(
sps=session.config.gnupg_passphrase,
on_complete=('notify',
lambda: self.gpg_key_ready(gk)))
Setup.KEY_CREATING_THREAD = gk
Setup.KEY_CREATING_THREAD.start()
# Finally we update misc. settings
for key in self.HTTP_POST_VARS.keys():
# FIXME: This should probably only happen if the GPG
# key was successfully created.
# Continue iff all is well...
if error_info:
break
if key in (['choose_key', 'passphrase', 'passphrase_confirm'] +
TestableWebbable.HTTP_POST_VARS.keys()):
continue
try:
val = self.data.get(key, [''])[0]
if val:
session.config.prefs[key] = self.TRUTHY[val.lower()]
changed = True
except (ValueError, KeyError):
error_info = (_('Invalid preference'), {
'invalid_setting': True,
'variable': key
})
results.update({
'creating_key': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.running),
'creating_failed': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.failed),
'chosen_key': session.config.prefs.gpg_recipient,
'prefs': {
'index_encrypted': session.config.prefs.index_encrypted,
'obfuscate_index': session.config.prefs.obfuscate_index,
'encrypt_mail': session.config.prefs.encrypt_mail,
'encrypt_index': session.config.prefs.encrypt_index,
'encrypt_vcards': session.config.prefs.encrypt_vcards,
'encrypt_events': session.config.prefs.encrypt_events,
'encrypt_misc': session.config.prefs.encrypt_misc
}
})
if changed:
self._background_save(config=True)
if error_info:
return self._error(error_info[0],
info=error_info[1], result=results)
elif changed:
return self._success(_('Updated crypto preferences'), results)
else:
return self._success(_('Configure crypto preferences'), results)
class SetupProfiles(SetupCrypto):
SYNOPSIS = (None, None, 'setup/profiles', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'email': 'Create a profile for this e-mail address',
'name': 'Name associated with this e-mail',
'note': 'Profile note',
'pass': 'Password for remote accounts',
'route_id': 'Route ID for sending mail',
})
TEST_DATA = {}
# This is where we cache the passwords we are given, for use later.
# This is deliberately made a singleton on the class.
PASSWORD_CACHE = {}
def _auto_configurable(self, email):
# FIXME: Actually look things up, this is super lame
return email.endswith('@gmail.com')
def get_profiles(self, secret_keys=None):
data = ListProfiles(self.session).run().result
profiles = {}
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
email = profile["email"][0]["email"]
name = profile["fn"]
note = profile.get('note', '')
profiles[rid] = {
"name": name,
"note": note,
"pgp_keys": [], # FIXME
"email": email,
"route_id": profile.get('x-mailpile-profile-route', ''),
"photo": profile.get('photo', [{}])[0].get('photo', ''),
"auto_configurable": self._auto_configurable(email)
}
for key, info in (secret_keys or {}).iteritems():
for uid in info['uids']:
email = uid.get('email')
if email in profiles:
profiles[email]["pgp_keys"].append(key)
return profiles
def discover_new_email_addresses(self, profiles):
addresses = {}
existing = set([p['email'] for p in profiles.values()])
for key, info in self.list_secret_keys().iteritems():
for uid in info['uids']:
email = uid.get('email')
note = uid.get('comment')
if email:
if email in existing:
continue
if email not in addresses:
addresses[email] = {'pgp_keys': [],
'name': '', 'note': ''}
ai = addresses[email]
name = uid.get('name')
ai['name'] = name if name else ai['name']
ai['note'] = note if note else ai['note']
ai['pgp_keys'].append(key)
# FIXME: Scan Thunderbird and MacMail for e-mails, other apps...
return addresses
def auto_create_profiles(self):
new_emails = self.discover_new_email_addresses(self.get_profiles())
for email, info in new_emails.iteritems():
AddProfile(self.session, data={
'_method': 'POST',
'email': [email],
'note': [info["note"]],
'name': [info['name']]
}).run()
def _result(self):
profiles = self.get_profiles()
return {
'new_emails': self.discover_new_email_addresses(profiles),
'profiles': profiles,
'routes': self.session.config.routes,
'default_email': self.session.config.prefs.default_email
}
def setup_command(self, session):
changed = False
if self.data.get('_method') == 'POST' or self._testing():
name, email, note, pwd = (self.data.get(k, [None])[0] for k in
('name', 'email', 'note', 'pass'))
if email:
rv = AddProfile(session, data=self.data).run()
if rv.status == 'success':
#
# FIXME: We need to fire off a background process to
# try and auto-discover routes and sources.
#
if not session.config.prefs.default_email:
session.config.prefs.default_email = email
changed = True
self.save_profiles_to_key()
else:
return self._error(_('Failed to add profile'),
info=rv.error_info,
result=self._result())
if email and pwd:
sps = SecurePassphraseStorage(pwd)
SetupProfiles.PASSWORD_CACHE[email] = sps
result = self._result()
if not result['default_email']:
profiles = result['profiles'].values()
profiles.sort(key=lambda p: (len(p['pgp_keys']),
len(p['name'])))
e = result['default_email'] = profiles[-1]['email']
session.config.prefs.default_email = e
changed = True
else:
result = self._result()
if changed:
self._background_save(config=True)
return self._success(_('Your profiles'), result)
class SetupConfigureKey(SetupProfiles):
SYNOPSIS = (None, None, 'setup/configure_key', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
})
TEST_DATA = {}
def _result(self):
keylist = self.list_secret_keys()
profiles = self.get_profiles(secret_keys=keylist)
return {
'secret_keys': keylist,
'profiles': profiles,
}
def setup_command(self, session):
# FIXME!
return self._success(_('Configuring a key'), self._result())
class SetupTestRoute(SetupProfiles):
SYNOPSIS = (None, None, 'setup/test_route', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('POST', )
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS,
dict((k, v[0]) for k, v in
CONFIG_RULES['routes'][1].iteritems()),
{'route_id': 'ID of existing route'})
TEST_DATA = {}
def setup_command(self, session):
if self.args:
route_id = self.args[0]
elif 'route_id' in self.data:
route_id = self.data['route_id'][0]
else:
route_id = None
if route_id:
route = self.session.config.routes[route_id]
assert(route)
else:
route = {}
for k in CONFIG_RULES['routes'][1]:
if k not in self.data:
pass
elif CONFIG_RULES['routes'][1][k][1] in (int, 'int'):
route[k] = int(self.data[k][0])
else:
route[k] = self.data[k][0]
fromaddr = route.get('username', '')
if '@' not in fromaddr:
fromaddr = self.session.config.get_profile()['email']
if not fromaddr or '@' not in fromaddr:
fromaddr = '%s@%s' % (route.get('username', 'test'),
route.get('host', 'example.com'))
assert(fromaddr)
error_info = {'error': _('Unknown error')}
try:
assert(SendMail(self.session, None,
[(fromaddr,
[fromaddr, '[email protected]'],
None,
[self.event])],
test_only=True, test_route=route))
return self._success(_('Route is working'),
result=route)
except OSError:
error_info = {'error': _('Invalid command'),
'invalid_command': True}
except SendMailError, e:
error_info = {'error': e.message,
'sendmail_error': True}
error_info.update(e.error_info)
except:
import traceback
traceback.print_exc()
return self._error(_('Route is not working'),
result=route, info=error_info)
class Setup(TestableWebbable):
"""Enter setup flow"""
SYNOPSIS = (None, 'setup', 'setup', '[do_gpg_stuff]')
ORDER = ('Internals', 0)
LOG_PROGRESS = True
HTTP_CALLABLE = ('GET',)
HTTP_AUTH_REQUIRED = True
# These are a global, may be modified...
KEY_WORKER_LOCK = CryptoRLock()
KEY_CREATING_THREAD = None
KEY_EDITING_THREAD = None
@classmethod
def _check_profiles(self, config):
session = Session(config)
session.ui = SilentInteraction(config)
session.ui.block()
data = ListProfiles(session).run().result
okay = routes = bad = 0
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
if profile.get('email', None):
okay += 1
route_id = profile.get('x-mailpile-profile-route', '')
if route_id:
if route_id in config.routes:
routes += 1
else:
bad += 1
else:
bad += 1
return (routes > 0) and (okay > 0) and (bad == 0)
@classmethod
def _CHECKPOINTS(self, config):
return [
# Stage 0: Welcome: Choose app language
('language', lambda: config.prefs.language, SetupWelcome),
# Stage 1: Crypto: Configure our master key stuff
('crypto', lambda: config.prefs.gpg_recipient, SetupCrypto),
# Stage 2: Identity (via. single page install flow)
('profiles', lambda: self._check_profiles(config), Setup),
# Stage 3: Routes (via. single page install flow)
('routes', lambda: config.routes, Setup),
# Stage 4: Sources (via. single page install flow)
('sources', lambda: config.sources, Setup),
# Stage 5: Is All Complete
('complete', lambda: config.web.setup_complete, Setup),
# FIXME: Check for this too?
#(lambda: config.prefs.crypto_policy != 'none', SetupConfigureKey),
]
@classmethod
def Next(cls, config, default, needed_auth=True):
if not config.loaded_config:
return default
for name, guard, step in cls._CHECKPOINTS(config):
auth_required = (step.HTTP_AUTH_REQUIRED is True
or (config.prefs.gpg_recipient and
step.HTTP_AUTH_REQUIRED == 'Maybe'))
if not guard():
if (not needed_auth) or (not auth_required):
return step
return default
def setup_command(self, session):
if '_method' in self.data:
return self._success(_('Entering setup flow'), result=dict(
((c[0], c[1]() and True or False)
for c in self._CHECKPOINTS(session.config)
)))
else:
return SetupMagic.setup_command(self, session)
_ = gettext
_plugins.register_commands(SetupMagic,
SetupGetEmailSettings,
SetupWelcome,
SetupCrypto,
SetupProfiles,
SetupConfigureKey,
SetupTestRoute,
Setup)
| apache-2.0 | -7,887,133,988,458,968,000 | 37.95229 | 83 | 0.503748 | false |
munhanha/mtg-random | djangoappengine/boot.py | 1 | 8121 | import logging
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DATA_ROOT = os.path.join(PROJECT_DIR, '.gaedata')
# Overrides for os.environ
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env():
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError:
for k in [k for k in sys.modules if k.startswith('google')]:
del sys.modules[k]
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
os.environ.get('APP_ENGINE_SDK'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').split(os.pathsep):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
path = r'%(PROGRAMFILES)s\Google\google_appengine' % os.environ
paths.append(path)
# Loop through all possible paths and look for the SDK dir.
sdk_path = None
for path in paths:
if not path:
continue
path = os.path.expanduser(path)
path = os.path.realpath(path)
if os.path.exists(path):
sdk_path = path
break
if sdk_path is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n")
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
extra_paths = [sdk_path]
lib = os.path.join(sdk_path, 'lib')
# Automatically add all packages in the SDK's lib folder:
for name in os.listdir(lib):
root = os.path.join(lib, name)
subdir = name
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(root, subdir), os.path.join(root, 'lib', subdir))
for path in detect:
if os.path.isdir(path):
extra_paths.append(os.path.dirname(path))
break
else:
if name == 'webapp2':
extra_paths.append(root)
elif name == 'webob_0_9':
extra_paths.append(root)
sys.path = extra_paths + sys.path
from google.appengine.api import apiproxy_stub_map
setup_project()
from .utils import have_appserver
if have_appserver:
# App Engine's threading.local is broken
setup_threading()
elif not os.path.exists(DATA_ROOT):
os.mkdir(DATA_ROOT)
setup_logging()
if not have_appserver:
# Patch Django to support loading management commands from zip files
from django.core import management
management.find_commands = find_commands
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
This version works for django deployments which are file based or
contained in a ZIP (in sys.path).
Returns an empty list if no commands are defined.
"""
import pkgutil
return [modname for importer, modname, ispkg in pkgutil.iter_modules(
[os.path.join(management_dir, 'commands')]) if not ispkg]
def setup_threading():
if sys.version_info >= (2, 7):
return
# XXX: On Python 2.5 GAE's threading.local doesn't work correctly with subclassing
try:
from django.utils._threading_local import local
import threading
threading.local = local
except ImportError:
pass
def setup_logging():
# Fix Python 2.6 logging module
logging.logMultiprocessing = 0
# Enable logging
level = logging.DEBUG
from .utils import have_appserver
if have_appserver:
# We can't import settings at this point when running a normal
# manage.py command because this module gets imported from settings.py
from django.conf import settings
if not settings.DEBUG:
level = logging.INFO
logging.getLogger().setLevel(level)
def setup_project():
from .utils import have_appserver, on_production_server
if have_appserver:
# This fixes a pwd import bug for os.path.expanduser()
env_ext['HOME'] = PROJECT_DIR
# The dev_appserver creates a sandbox which restricts access to certain
# modules and builtins in order to emulate the production environment.
# Here we get the subprocess module back into the dev_appserver sandbox.
# This module is just too important for development.
# Also we add the compiler/parser module back and enable https connections
# (seem to be broken on Windows because the _ssl module is disallowed).
if not have_appserver:
from google.appengine.tools import dev_appserver
try:
# Backup os.environ. It gets overwritten by the dev_appserver,
# but it's needed by the subprocess module.
env = dev_appserver.DEFAULT_ENV
dev_appserver.DEFAULT_ENV = os.environ.copy()
dev_appserver.DEFAULT_ENV.update(env)
# Backup the buffer() builtin. The subprocess in Python 2.5 on
# Linux and OS X uses needs it, but the dev_appserver removes it.
dev_appserver.buffer = buffer
except AttributeError:
logging.warn('Could not patch the default environment. '
'The subprocess module will not work correctly.')
try:
# Allow importing compiler/parser, _ssl (for https),
# _io for Python 2.7 io support on OS X
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io'))
except AttributeError:
logging.warn('Could not patch modules whitelist. '
'The compiler and parser modules will not work and '
'SSL support is disabled.')
elif not on_production_server:
try:
# Restore the real subprocess module
from google.appengine.api.mail_stub import subprocess
sys.modules['subprocess'] = subprocess
# Re-inject the buffer() builtin into the subprocess module
from google.appengine.tools import dev_appserver
subprocess.buffer = dev_appserver.buffer
except Exception, e:
logging.warn('Could not add the subprocess module to the sandbox: %s' % e)
os.environ.update(env_ext)
extra_paths = [PROJECT_DIR, os.path.join(os.path.dirname(__file__), 'lib')]
zip_packages_dir = os.path.join(PROJECT_DIR, 'zip-packages')
# We support zipped packages in the common and project folders.
if os.path.isdir(zip_packages_dir):
for zip_package in os.listdir(zip_packages_dir):
extra_paths.append(os.path.join(zip_packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't call setup_project()
# multiple times. We ensure this indirectly by checking if we've already
# modified sys.path, already.
if len(sys.path) < len(extra_paths) or \
sys.path[:len(extra_paths)] != extra_paths:
for path in extra_paths:
while path in sys.path:
sys.path.remove(path)
sys.path = extra_paths + sys.path
| bsd-3-clause | 4,427,875,777,957,635,000 | 41.742105 | 147 | 0.619751 | false |
TheWebMonks/equipo | app/freelancers/viewsets.py | 1 | 4110 | from .serializers import serializers
from .models import *
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, permissions, renderers
class ProjectViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Project.objects.all()
serializer_class = serializers.ProjectSerializer
class CompanyViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Company.objects.all()
serializer_class = serializers.CompanySerializer
class SkillViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Skill.objects.all()
serializer_class = serializers.SkillSerializer
class TypeOfContractViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = TypeOfContract.objects.all()
serializer_class = serializers.TypeOfContractSerializer
class SocialNetworkViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = SocialNetwork.objects.all()
serializer_class = serializers.SocialNetworkSerializer
class SocialAccountViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = SocialAccount.objects.all()
serializer_class = serializers.SocialAccountsSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Profile.objects.all()
serializer_class = serializers.ProfileSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class ExperienceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Experience.objects.all()
serializer_class = serializers.ExperienceSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class EducationViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Education.objects.all()
serializer_class = serializers.EducationSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = serializers.GroupSerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Category.objects.all()
serializer_class = serializers.CategorySerializer
class KindOfTaskViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = KindOfTask.objects.all()
serializer_class = serializers.KindOfTaskSerializer
class ExpenseViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Expense.objects.all()
serializer_class = serializers.ExpenseSerializer
class ExpendedTimeViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = ExpendedTime.objects.all()
serializer_class = serializers.ExpendedTimeSerializer
class ContractViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Contract.objects.all()
serializer_class = serializers.ContractSerializer
class InvoiceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Invoice.objects.all()
serializer_class = serializers.InvoiceSerializer
| apache-2.0 | -1,411,435,979,445,298,400 | 27.541667 | 65 | 0.725304 | false |
srossross/stable.world | stable_world/managers/base.py | 1 | 1825 | from __future__ import unicode_literals
import os
import click
from pipes import quote
from .push_file import push_file, pull_file
from stable_world.py_helpers import urlparse, urlunparse
class BaseManager(object):
NAME = None
PROGRAM = None
@classmethod
def enabled(cls):
if cls.PROGRAM is None:
return True
for path in os.getenv('PATH', '').split(os.pathsep):
if os.path.isfile(os.path.join(path, cls.PROGRAM)):
return True
def __init__(self, site_url, urls, bucket, token, dryrun):
self.site_url = site_url
self.bucket = bucket
self.token = token
self.dryrun = dryrun
self.cache_name = self.NAME
self.cache_info = urls[self.NAME]
@property
def config_file(self):
raise NotImplementedError()
@property
def cache_dir(self):
cache_dir = os.path.join('~', '.cache', 'stable.world', self.bucket)
return os.path.expanduser(cache_dir)
def get_base_url(self, basicAuthRequired=False):
site_url = self.site_url
if basicAuthRequired:
site_uri = urlparse(self.site_url)
site_url = urlunparse(site_uri._replace(netloc='{}:{}@{}'.format(
'token',
self.token,
site_uri.netloc
)))
return '%s/cache/%s/%s/' % (site_url, self.bucket, self.cache_name)
def use(self):
if not self.dryrun:
push_file(self.config_file)
return self.update_config_file()
@classmethod
def unuse(cls, info):
if not info:
return
for config_file in info.get('config_files', []):
click.echo('Removing {} config file {}'.format(cls.NAME, quote(config_file)))
pull_file(config_file)
| bsd-2-clause | -4,610,486,736,509,769,000 | 25.071429 | 89 | 0.580274 | false |
avedaee/DIRAC | DataManagementSystem/Service/FileCatalogProxyHandler.py | 1 | 3492 | ########################################################################
# $HeadURL $
# File: FileCatalogProxyHandler.py
########################################################################
""" :mod: FileCatalogProxyHandler
================================
.. module: FileCatalogProxyHandler
:synopsis: This is a service which represents a DISET proxy to the File Catalog
"""
## imports
import os
from types import StringTypes, DictType, TupleType
## from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "$Id$"
def initializeFileCatalogProxyHandler( serviceInfo ):
""" service initalisation """
return S_OK()
class FileCatalogProxyHandler( RequestHandler ):
"""
.. class:: FileCatalogProxyHandler
"""
types_callProxyMethod = [ StringTypes, StringTypes, TupleType, DictType ]
def export_callProxyMethod( self, fcName, methodName, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 120, self.__proxyWrapper, fcName, methodName, args, kargs )
if res['OK']:
return res['Value']
else:
return res
def __proxyWrapper( self, fcName, methodName, args, kwargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
:param self: self reference
:param str name: fcn name
:param tuple args: fcn args
:param dict kwargs: fcn keyword args
"""
result = self.__prepareSecurityDetails()
if not result['OK']:
return result
proxyLocation =result['Value']
try:
fileCatalog = FileCatalog( [fcName] )
method = getattr( fileCatalog, methodName )
except AttributeError, error:
errStr = "%s proxy: no method named %s" % ( fcName, methodName )
gLogger.exception( errStr, methodName, error )
return S_ERROR( errStr )
try:
result = method( *args, **kwargs )
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
return result
except Exception, error:
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
errStr = "%s proxy: Exception while performing %s" % ( fcName, methodName )
gLogger.exception( errStr, error )
return S_ERROR( errStr )
def __prepareSecurityDetails( self, vomsFlag = True ):
""" Obtains the connection details for the client """
try:
credDict = self.getRemoteCredentials()
clientDN = credDict[ 'DN' ]
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
if vomsFlag:
result = gProxyManager.downloadVOMSProxyToFile( clientDN, clientGroup )
else:
result = gProxyManager.downloadProxyToFile( clientDN, clientGroup )
if not result['OK']:
return result
gLogger.debug( "Updating environment." )
os.environ['X509_USER_PROXY'] = result['Value']
return result
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR( exStr )
| gpl-3.0 | 5,032,431,279,644,278,000 | 35.757895 | 97 | 0.645762 | false |
fkie-cad/FACT_core | src/test/unit/web_interface/test_app_show_analysis.py | 1 | 2522 | from helperFunctions.data_conversion import make_bytes
from test.common_helper import TEST_FW, TEST_FW_2, TEST_TEXT_FILE
from test.unit.web_interface.base import WebInterfaceTest
class TestAppShowAnalysis(WebInterfaceTest):
def test_app_show_analysis_get_valid_fw(self):
result = self.test_client.get('/analysis/{}'.format(TEST_FW.uid)).data
assert b'<strong>UID:</strong> ' + make_bytes(TEST_FW.uid) in result
assert b'data-toggle="tooltip" title="mandatory plugin description"' in result
assert b'data-toggle="tooltip" title="optional plugin description"' in result
# check release date not available
assert b'1970-01-01' not in result
assert b'unknown' in result
# check file preview
assert b'Preview' not in result
result = self.test_client.get('/analysis/{}'.format(TEST_FW_2.uid)).data
assert b'unknown' not in result
assert b'2000-01-01' in result
def test_app_show_analysis_file_with_preview(self):
result = self.test_client.get('/analysis/{}'.format(TEST_TEXT_FILE.uid)).data
assert b'<strong>UID:</strong> ' + make_bytes(TEST_TEXT_FILE.uid) in result
assert b'Preview' in result
assert b'test file:\ncontent:'
def test_app_single_file_analysis(self):
result = self.test_client.get('/analysis/{}'.format(TEST_FW.uid))
assert b'Add new analysis' in result.data
assert b'Update analysis' in result.data
assert not self.mocked_interface.tasks
post_new = self.test_client.post('/analysis/{}'.format(TEST_FW.uid), content_type='multipart/form-data', data={'analysis_systems': ['plugin_a', 'plugin_b']})
assert post_new.status_code == 302
assert self.mocked_interface.tasks
assert self.mocked_interface.tasks[0].scheduled_analysis == ['plugin_a', 'plugin_b']
def test_app_dependency_graph(self):
result = self.test_client.get('/dependency-graph/{}'.format('testgraph'))
assert b'<strong>UID:</strong> testgraph' in result.data
assert b'Error: Graph could not be rendered. The file chosen as root must contain a filesystem with binaries.' not in result.data
assert b'Warning: Elf analysis plugin result is missing for 1 files' in result.data
result_error = self.test_client.get('/dependency-graph/{}'.format('1234567'))
assert b'Error: Graph could not be rendered. The file chosen as root must contain a filesystem with binaries.' in result_error.data
| gpl-3.0 | 730,382,524,230,051,600 | 49.44 | 165 | 0.681998 | false |
akfreas/TravisTest | scripts/TestFlightDistribute.py | 1 | 2890 | #!/usr/bin/env python
import biplist
import os
import requests
import tempfile
import sys
import git
testflight_api_url = "http://testflightapp.com/api/builds.json"
def upload_app_to_testflight(archive_path, testflight_api_token, testflight_team_token, testflight_distro_lists=[], notify=True):
plist_path = "%s/Info.plist" % archive_path
archive_plist = biplist.readPlist(plist_path)
app_path = archive_plist['ApplicationProperties']['ApplicationPath']
app_name = archive_plist['Name']
full_app_path = "%s/Products/%s" % (archive_path, app_path)
dsym_path = "%s/dSYMs/%s.app.dSYM" % (archive_path, app_name)
app_package = package_app('iphoneos', full_app_path, app_name)
app_dsym_zip = zip_dsym(dsym_path)
notes = compile_note_from_git_head()
distro_list = ",".join(testflight_distro_lists)
file_upload_params = {'file' : app_package, 'dsym' : app_dsym_zip}
meta_params = {
'api_token' : testflight_api_token,
'team_token' : testflight_team_token,
'notes' : notes,
'notify' : notify,
'distribution_lists' : distro_list
}
upload_response = requests.post(testflight_api_url, meta_params, files=file_upload_params)
app_package.close()
app_dsym_zip.close()
print upload_response.text
if upload_response.status_code == 200:
return True
else:
return False
def package_app(sdk, app_path, app_name):
temp_dir = tempfile.mkdtemp()
ipa_path = "%s/%s.ipa" % (temp_dir, app_name)
xc_run_command = "xcrun -sdk %s PackageApplication '%s' -o %s" % (sdk, app_path, ipa_path)
print xc_run_command
command = os.system(xc_run_command)
app_package = open(ipa_path)
return app_package
def zip_dsym(dsym_path):
from zipfile import ZipFile
temp_dir = tempfile.mkdtemp()
temp_dsym_path = "%s/%s" % (temp_dir, os.path.basename(dsym_path))
dsym_zip = ZipFile(temp_dsym_path, "w")
zipdir(dsym_path, dsym_zip)
dsym_zip.close()
dsym_zip = open(temp_dsym_path, "r")
return dsym_zip
def zipdir(path, zip):
for root, dirs, files in os.walk(path):
for file in files:
zip.write(os.path.join(root, file))
def compile_note_from_git_head():
repo = git.Repo(os.getcwd())
head_message = repo.head.commit.message
head_short_sha = repo.head.commit.hexsha[:8]
note = "%s %s" % (head_short_sha, head_message)
return note
def main():
archive_path = os.popen("find ~/Library/Developer/Xcode/Archives -type d -Btime -60m -name '*.xcarchive' | head -1").read().strip()
print archive_path
api_token = os.getenv("TESTFLIGHT_API_TOKEN")
team_token = os.getenv("TESTFLIGHT_TEAM_TOKEN")
upload_app_to_testflight(archive_path, api_token, team_token)
if __name__ == "__main__":
main()
| mit | -4,798,690,906,711,449,000 | 30.075269 | 135 | 0.632872 | false |
googleapis/googleapis-gen | grafeas/v1/grafeas-v1-py/grafeas/grafeas_v1/types/build.py | 1 | 2488 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from grafeas.grafeas_v1.types import provenance as g_provenance
__protobuf__ = proto.module(
package='grafeas.v1',
manifest={
'BuildNote',
'BuildOccurrence',
},
)
class BuildNote(proto.Message):
r"""Note holding the version of the provider's builder and the
signature of the provenance message in the build details
occurrence.
Attributes:
builder_version (str):
Required. Immutable. Version of the builder
which produced this build.
"""
builder_version = proto.Field(
proto.STRING,
number=1,
)
class BuildOccurrence(proto.Message):
r"""Details of a build occurrence.
Attributes:
provenance (grafeas.grafeas_v1.types.BuildProvenance):
Required. The actual provenance for the
build.
provenance_bytes (str):
Serialized JSON representation of the provenance, used in
generating the build signature in the corresponding build
note. After verifying the signature, ``provenance_bytes``
can be unmarshalled and compared to the provenance to
confirm that it is unchanged. A base64-encoded string
representation of the provenance bytes is used for the
signature in order to interoperate with openssl which
expects this format for signature verification.
The serialized form is captured both to avoid ambiguity in
how the provenance is marshalled to json as well to prevent
incompatibilities with future changes.
"""
provenance = proto.Field(
proto.MESSAGE,
number=1,
message=g_provenance.BuildProvenance,
)
provenance_bytes = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -2,803,864,520,464,966,700 | 30.493671 | 74 | 0.668408 | false |
TuKo/brainiak | examples/fcma/classification.py | 1 | 9141 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.classifier import Classifier
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak.io import dataset
from sklearn import svm
#from sklearn.linear_model import LogisticRegression
import sys
import logging
import numpy as np
from scipy.spatial.distance import hamming
from sklearn import model_selection
#from sklearn.externals import joblib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj]
rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj]
clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels,
num_training_samples=num_epochs_per_subj*(num_subjects-1))
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[0:num_epochs_per_subj]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
def example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj):
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
for i in range(num_subjects):
leave_start = i * num_epochs_per_subj
leave_end = (i+1) * num_epochs_per_subj
training_data = raw_data[0:leave_start] + raw_data[leave_end:]
test_data = raw_data[leave_start:leave_end]
training_labels = labels[0:leave_start] + labels[leave_end:]
test_labels = labels[leave_start:leave_end]
clf.fit(list(zip(training_data, training_data)), training_labels)
# joblib can be used for saving and loading models
#joblib.dump(clf, 'model/logistic.pkl')
#clf = joblib.load('model/svm.pkl')
predict = clf.predict(list(zip(test_data, test_data)))
print(predict)
print(clf.decision_function(list(zip(test_data, test_data))))
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when leaving subject %d out for testing, the accuracy is %d / %d = %.2f' %
(i, num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
print(clf.score(list(zip(test_data, test_data)), test_labels))
def example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj):
# NOTE: this method does not work for sklearn.svm.SVC with precomputed kernel
# when the kernel matrix is computed in portions; also, this method only works
# for self-correlation, i.e. correlation between the same data matrix.
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_subjects,
shuffle=False)
scores = model_selection.cross_val_score(clf, list(zip(raw_data, raw_data)),
y=labels,
cv=skf)
print(scores)
logger.info(
'the overall cross validation accuracy is %.2f' %
np.mean(scores)
)
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])),
labels[0:num_training_samples])
X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:]))
predict = clf.predict(X)
print(predict)
print(clf.decision_function(X))
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(X, test_labels))
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels,
num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data, raw_data2)), labels,
num_training_samples=num_training_samples)
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
# python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
epoch_list = np.load(epoch_file)
num_subjects = len(epoch_list)
num_epochs_per_subj = epoch_list[0].shape[1]
images = dataset.load_images_from_dir(data_dir, extension)
mask = dataset.load_boolean_mask(mask_file)
conditions = dataset.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj)
# test of two different components for correlation computation
# images = dataset.load_images_from_dir(data_dir, extension)
# mask2 = dataset.load_boolean_mask('face_scene/visual_top_mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
#example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
#example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
| apache-2.0 | 8,559,069,756,804,332,000 | 49.502762 | 129 | 0.68067 | false |
praekelt/vumi-go | go/routers/app_multiplexer/forms.py | 1 | 1271 | from django import forms
class ApplicationMultiplexerTitleForm(forms.Form):
content = forms.CharField(
label="Menu title",
max_length=100
)
class ApplicationMultiplexerForm(forms.Form):
application_label = forms.CharField(
label="Application label"
)
endpoint_name = forms.CharField(
label="Endpoint name"
)
class BaseApplicationMultiplexerFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
initial_data = []
for entry in data:
initial_data.append({
'application_label': entry['label'],
'endpoint_name': entry['endpoint'],
})
return initial_data
def to_config(self):
entries = []
for form in self.ordered_forms:
if not form.is_valid():
continue
entries.append({
"label": form.cleaned_data['application_label'],
"endpoint": form.cleaned_data['endpoint_name'],
})
return entries
ApplicationMultiplexerFormSet = forms.formsets.formset_factory(
ApplicationMultiplexerForm,
can_delete=True,
can_order=True,
extra=1,
formset=BaseApplicationMultiplexerFormSet)
| bsd-3-clause | 8,391,352,842,481,823,000 | 24.938776 | 68 | 0.610543 | false |
xmnlab/minilab | arch/socket_arch/simple_socket/client.py | 1 | 1045 | # Echo client program
from __future__ import print_function, division, unicode_literals
from random import randint
import socket
import pickle
import sys
import platform
if platform.system() == 'Linux':
sys.path.insert(0, '/var/www/mswim')
else:
sys.path.insert(0, '/mswim/')
from mswim import settings
HOST = 'localhost' # The remote host
PORT = 50007 # The same port as used by the server
def acquisition_data(type):
conn_id = str(randint(0, 10**10))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(pickle.dumps((settings.DEVICES[type], conn_id)))
breaker = '\n\n\n\n'
cache = ''
while True:
while True:
cache += s.recv(1024)
if breaker in cache:
i = cache.index(breaker)
data = pickle.loads(cache[:i])
cache = cache[i+len(breaker):]
print('Received %s registers.' % len(data))
del data
s.close()
acquisition_data('ceramic') | gpl-3.0 | 7,342,832,662,091,720,000 | 23.325581 | 65 | 0.6 | false |
penkin/python-dockercloud | dockercloud/api/http.py | 1 | 2612 | from __future__ import absolute_import
import logging
from requests import Request, Session
from requests import utils
from urllib.parse import urljoin
import dockercloud
from .exceptions import ApiError, AuthError
logger = logging.getLogger("python-dockercloud")
global_session = Session()
def get_session():
return global_session
def close_session():
try:
global global_session
global_session.close()
except:
pass
def new_session():
close_session()
global global_session
global_session = Session()
def send_request(method, path, inject_header=True, **kwargs):
json = None
url = urljoin(dockercloud.rest_host.encode(), path.strip("/").encode())
if not url.endswith(b"/"):
url = b"%s/" % url
user_agent = 'python-dockercloud/%s' % dockercloud.__version__
if dockercloud.user_agent:
user_agent = "%s %s" % (dockercloud.user_agent, user_agent)
# construct headers
headers = {'Content-Type': 'application/json', 'User-Agent': user_agent}
headers.update(dockercloud.auth.get_auth_header())
# construct request
s = get_session()
request = Request(method, url, headers=headers, **kwargs)
# get environment proxies
env_proxies = utils.get_environ_proxies(url) or {}
kw_args = {'proxies': env_proxies}
# make the request
req = s.prepare_request(request)
logger.info("Prepared Request: %s, %s, %s, %s" % (req.method, req.url, req.headers, kwargs))
response = s.send(req, **kw_args)
status_code = getattr(response, 'status_code', None)
logger.info("Response: Status %s, %s, %s" % (str(status_code), response.headers, response.text))
# handle the response
if not status_code:
# Most likely network trouble
raise ApiError("No Response (%s %s)" % (method, url))
elif 200 <= status_code <= 299:
# Success
if status_code != 204:
# Try to parse the response.
try:
json = response.json()
if response.headers and inject_header:
json["dockercloud_action_uri"] = response.headers.get("X-DockerCloud-Action-URI", "")
except TypeError:
raise ApiError("JSON Parse Error (%s %s). Response: %s" % (method, url, response.text))
else:
json = None
else:
# Server returned an error
if status_code == 401:
raise AuthError("Not authorized")
else:
raise ApiError("Status %s (%s %s). Response: %s" % (str(status_code), method, url, response.text))
return json
| apache-2.0 | 2,378,145,631,790,428,000 | 30.095238 | 110 | 0.620597 | false |
rawdigits/wee-slack | wee_slack.py | 1 | 170611 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from functools import wraps
from itertools import islice, count
import textwrap
import time
import json
import pickle
import sha
import os
import re
import urllib
import sys
import traceback
import collections
import ssl
import random
import string
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from websocket import create_connection, WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat
except:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <[email protected]>"
SCRIPT_VERSION = "2.2.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"shared": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "conversations.info",
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
NICK_GROUP_EXTERNAL = "2|External"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = []
###### Unicode handling
def encode_to_utf8(data):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
class ProxyWrapper(object):
def __init__(self):
self.proxy_name = w.config_string(weechat.config_get('weechat.network.proxy_curl'))
self.proxy_string = ""
self.proxy_type = ""
self.proxy_address = ""
self.proxy_port = ""
self.proxy_user = ""
self.proxy_password = ""
self.has_proxy = False
if self.proxy_name:
self.proxy_string = "weechat.proxy.{}".format(self.proxy_name)
self.proxy_type = w.config_string(weechat.config_get("{}.type".format(self.proxy_string)))
if self.proxy_type == "http":
self.proxy_address = w.config_string(weechat.config_get("{}.address".format(self.proxy_string)))
self.proxy_port = w.config_integer(weechat.config_get("{}.port".format(self.proxy_string)))
self.proxy_user = w.config_string(weechat.config_get("{}.username".format(self.proxy_string)))
self.proxy_password = w.config_string(weechat.config_get("{}.password".format(self.proxy_string)))
self.has_proxy = True
else:
w.prnt("", "\nWarning: weechat.network.proxy_curl is set to {} type (name : {}, conf string : {}). Only HTTP proxy is supported.\n\n".format(self.proxy_type, self.proxy_name, self.proxy_string))
def curl(self):
if not self.has_proxy:
return ""
if self.proxy_user and self.proxy_password:
user = "{}:{}@".format(self.proxy_user, self.proxy_password)
else:
user = ""
if self.proxy_port:
port = ":{}".format(self.proxy_port)
else:
port = ""
return "--proxy {}{}{}".format(user, self.proxy_address, port)
##### Helpers
def format_exc_tb():
return decode_from_utf8(traceback.format_exc())
def format_exc_only():
etype, value, _ = sys.exc_info()
return ''.join(decode_from_utf8(traceback.format_exception_only(etype, value)))
def get_nick_color_name(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
def get_functions_with_prefix(prefix):
return {name[len(prefix):]: ref for name, ref in globals().items()
if name.startswith(prefix)}
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = get_functions_with_prefix("command_")
self.proc = get_functions_with_prefix("process_")
self.handlers = get_functions_with_prefix("handle_")
self.local_proc = get_functions_with_prefix("local_process_")
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
self.handle_next_hook = None
self.handle_next_hook_interval = -1
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
data = self.context.get(identifier, None)
if data:
# dbg("retrieved context {} ".format(identifier))
return data
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
# dbg("deleted eontext {} ".format(identifier))
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team_id, team in self.teams.iteritems():
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
def receive_ws_callback(self, team_hash):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
try:
# Read the data from the websocket associated with this team.
data = team.ws.recv()
except WebSocketConnectionClosedException:
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (on receive), reconnecting.'.format(team.domain))
dbg('receive_ws_callback failed with exception:\n{}'.format(format_exc_tb()), level=5)
team.set_disconnected()
return w.WEECHAT_RC_OK
except ssl.SSLWantReadError:
# Expected to happen occasionally on SSL websockets.
return w.WEECHAT_RC_OK
message_json = json.loads(decode_from_utf8(data))
metadata = WeeSlackMetadata({
"team": team_hash,
}).jsonify()
message_json["wee_slack_metadata"] = metadata
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive_json(json.dumps(message_json))
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
try:
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
except:
dbg(request_metadata)
return
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
j["wee_slack_request_metadata"] = pickle.dumps(request_metadata)
self.reply_buffer.pop(request_metadata.response_id)
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
self.receive_json(json.dumps(j))
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code != -1:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
if request_metadata.request == 'rtm.start':
w.prnt('', ('Failed connecting to slack team with token starting with {}, ' +
'retrying. If this persists, try increasing slack_timeout.')
.format(request_metadata.token[:15]))
dbg('rtm.start failed with return_code {}. stack:\n{}'
.format(return_code, ''.join(traceback.format_stack())), level=5)
self.receive(request_metadata)
else:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
def receive_json(self, data):
"""
complete
Receives a raw JSON string from and unmarshals it
as dict, then places it back on the queue for processing.
"""
dbg("RECEIVED JSON of len {}".format(len(data)))
message_json = json.loads(data)
self.queue.append(message_json)
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
wanted_interval = 100
if len(self.slow_queue) > 0 or len(self.queue) > 0:
wanted_interval = 10
if self.handle_next_hook is None or wanted_interval != self.handle_next_hook_interval:
if self.handle_next_hook:
w.unhook(self.handle_next_hook)
self.handle_next_hook = w.hook_timer(wanted_interval, 0, 0, "handle_next", "")
self.handle_next_hook_interval = wanted_interval
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
# for q in self.slow_queue[0]:
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
# self.slow_queue = []
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
# Here we are passing the actual objects. No more lookups.
meta = j.get("wee_slack_metadata", None)
if meta:
try:
if isinstance(meta, basestring):
dbg("string of metadata")
team = meta.get("team", None)
if team:
kwargs["team"] = self.teams[team]
if "user" in j:
kwargs["user"] = self.teams[team].users[j["user"]]
if "channel" in j:
kwargs["channel"] = self.teams[team].channels[j["channel"]]
except:
dbg("metadata failure")
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, **kwargs)
elif function_name in self.proc:
self.proc[function_name](j, self, **kwargs)
elif function_name in self.handlers:
self.handlers[function_name](j, self, **kwargs)
else:
dbg("Callback not implemented for event: {}".format(function_name))
def handle_next(*args):
"""
complete
This is just a place to call the event router globally.
This is a dirty hack. There must be a better way.
"""
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
channel = self.buffers.get(buffer_ptr)
if channel:
channel.destroy_buffer(update_remote)
del self.buffers[buffer_ptr]
if close_buffer:
w.buffer_close(buffer_ptr)
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr, None)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def receive_httprequest_callback(data, command, return_code, out, err):
"""
complete
This is a dirty hack. There must be a better way.
"""
# def url_processor_cb(data, command, return_code, out, err):
EVENTROUTER.receive_httprequest_callback(data, command, return_code, out, err)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_ws_callback(*args):
"""
complete
The first arg is all we want here. It contains the team
hash which is set when we _hook the descriptor.
This is a dirty hack. There must be a better way.
"""
EVENTROUTER.receive_ws_callback(args[0])
return w.WEECHAT_RC_OK
@utf8_decode
def ws_ping_cb(data, remaining_calls):
for team in EVENTROUTER.teams.values():
if team.ws:
team.ws.ping()
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
Receives a callback from weechat when a buffer is being closed.
"""
EVENTROUTER.weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
def get_id(message_id):
if not message_id:
return 1
elif message_id[0] == "$":
return message_id[1:]
else:
return int(message_id)
message_id_regex = "(\d*|\$[0-9a-fA-F]{3,})"
reaction = re.match("^{}(\+|-):(.*):\s*$".format(message_id_regex), data)
substitute = re.match("^{}s/".format(message_id_regex), data)
if reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(get_id(reaction.group(1)), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(get_id(reaction.group(1)), reaction.group(3))
elif substitute:
msg_id = get_id(substitute.group(1))
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msg_id, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
incomplete
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
# global buffer_list_update
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(data, signal, current_buffer):
msg = w.buffer_get_string(current_buffer, "input")
if len(msg) > 8 and msg[:1] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for t in EVENTROUTER.teams.values():
slackbot = t.get_channel_map()['Slackbot']
channel = t.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, item, current_window, current_buffer, extra_info):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for t in EVENTROUTER.teams.values():
for channel in t.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = w.color('yellow') + "typing: " + typing
return typing
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u:
w.hook_completion_list_add(completion, "@" + u.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None:
return w.WEECHAT_RC_OK
for e in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, ":" + e + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# channel = channels.find(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u and u.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
complete
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, token, request, post_data={}, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.tries = 0
self.start_time = time.time()
self.domain = 'api.slack.com'
self.request = request
self.request_normalized = re.sub(r'\W+', '', request)
self.token = token
post_data["token"] = token
self.post_data = post_data
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.url = 'https://{}/api/{}?{}'.format(self.domain, request, urllib.urlencode(encode_to_utf8(post_data)))
self.response_id = sha.sha("{}{}".format(self.url, self.start_time)).hexdigest()
self.retries = kwargs.get('retries', 3)
# def __repr__(self):
# return "URL: {} Tries: {} ID: {}".format(self.url, self.tries, self.response_id)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha.sha("{}{}".format(self.url, time.time())).hexdigest()
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, websocket_url, team_info, nick, myidentifier, users, bots, channels, **kwargs):
self.identifier = team_info["id"]
self.ws_url = websocket_url
self.connected = False
self.connecting = False
self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subdomain = team_info["domain"]
self.domain = self.subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.team_hash = SlackTeam.generate_team_hash(self.nick, self.subdomain)
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# self.channel_set_related_server(c)
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.set_highlight_words(kwargs.get('highlight_words', ""))
self.load_emoji_completions()
self.type = "team"
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
if compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain:
return True
else:
return False
@property
def members(self):
return self.users.viewkeys()
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI)
if self.emoji_completions:
s = SlackRequest(self.token, "emoji.list", {}, team_hash=self.team_hash)
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
# def connect_request_generate(self):
# return SlackRequest(self.token, 'rtm.start', {})
# def close_all_buffers(self):
# for channel in self.channels:
# self.eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, update_remote=False, close_buffer=True)
# #also close this server buffer
# self.eventrouter.weechat_controller.unregister_buffer(self.channel_buffer, update_remote=False, close_buffer=True)
def create_buffer(self):
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
def destroy_buffer(self, update_remote):
pass
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',') if x}
for channel in self.channels.itervalues():
channel.set_highlights()
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',') if x}
for channel in self.channels.itervalues():
channel.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data, message=False):
tag_name = "team_message" if message else "team_info"
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag(tag_name), data)
def send_message(self, message, subtype=None, request_dict_ext={}):
w.prnt("", "ERROR: Sending a message in the team buffer is not supported")
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.itervalues():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.slack_name: k for k, v in self.channels.iteritems()}
def get_username_map(self):
return {v.name: k for k, v in self.users.iteritems()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(nick, subdomain):
return str(sha.sha("{}{}".format(nick, subdomain)).hexdigest())
def refresh(self):
self.rename()
def rename(self):
pass
# def attach_websocket(self, ws):
# self.ws = ws
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user and user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting:
self.connecting = True
if self.ws_url:
try:
# only http proxy is currently supported
proxy = ProxyWrapper()
if proxy.has_proxy == True:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs, http_proxy_host=proxy.proxy_address, http_proxy_port=proxy.proxy_port, http_proxy_auth=(proxy.proxy_user, proxy.proxy_password))
else:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock._sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
# self.attach_websocket(ws)
self.set_connected()
self.connecting = False
except:
w.prnt(self.channel_buffer,
'Failed connecting to slack team {}, retrying.'.format(self.domain))
dbg('connect failed with exception:\n{}'.format(format_exc_tb()), level=5)
self.connecting = False
return False
else:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999)
self.eventrouter.receive(s)
self.connecting = False
# del self.eventrouter.teams[self.get_team_hash()]
self.set_reconnect_url(None)
def set_connected(self):
self.connected = True
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except:
w.prnt(self.channel_buffer,
'Lost connection to slack team {} (on send), reconnecting.'.format(self.domain))
dbg('send_to_websocket failed with data: `{}` and exception:\n{}'
.format(message, format_exc_tb()), level=5)
self.set_disconnected()
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = self.users.keys()[0:750]
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannelCommon(object):
def send_add_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.add", msg_id, reaction)
def send_remove_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.remove", msg_id, reaction)
def send_change_reaction(self, method, msg_id, reaction):
if type(msg_id) is not int:
if msg_id in self.hashed_messages:
timestamp = str(self.hashed_messages[msg_id].ts)
else:
return
elif 0 < msg_id <= len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_id - 1, None))
else:
return
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team.token, method, data)
self.eventrouter.receive(s)
def edit_nth_previous_message(self, msg_id, old, new, flags):
message = self.my_last_message(msg_id)
if message is None:
return
if new == "" and old == "":
s = SlackRequest(self.team.token, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
num_replace = 1
if 'g' in flags:
num_replace = 0
new_message = re.sub(old, new, message["text"], num_replace)
if new_message != message["text"]:
s = SlackRequest(self.team.token, "chat.update", {"channel": self.identifier, "ts": message['ts'], "text": new_message}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def my_last_message(self, msg_id):
if type(msg_id) is not int:
m = self.hashed_messages.get(msg_id)
if m is not None and m.message_json.get("user") == self.team.myidentifier:
return m.message_json
else:
for key in self.main_message_keys_reversed():
m = self.messages[key]
if m.message_json.get("user") == self.team.myidentifier:
msg_id -= 1
if msg_id == 0:
return m.message_json
def change_message(self, ts, message_json=None, text=None):
ts = SlackTS(ts)
m = self.messages.get(ts)
if not m:
return
if message_json:
m.message_json.update(message_json)
if text:
m.change_text(text)
if type(m) == SlackMessage or config.thread_messages_in_channel:
new_text = self.render(m, force=True)
modify_buffer_line(self.channel_buffer, ts, new_text)
if type(m) == SlackThreadMessage:
thread_channel = m.parent_message.thread_channel
if thread_channel and thread_channel.active:
new_text = thread_channel.render(m, force=True)
modify_buffer_line(thread_channel.channel_buffer, ts, new_text)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(msg):
return sha.sha(str(msg.ts)).hexdigest()
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message)
hl = 3
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_msg = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_msg)[:hl]
col_msg.hash = col_new_hash
self.hashed_messages[col_new_hash] = col_msg
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message
message.hash = shorthash
return shorthash
elif ts in self.messages:
return self.messages[ts].hash
class SlackChannel(SlackChannelCommon):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {"value": ""})
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
self.last_line_from = None
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
@property
def muted(self):
return self.identifier in self.team.muted_channels
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
if self.muted and config.muted_channels_activity != "all":
return
for c in range(self.unread_count_display):
if self.type in ["im", "mpim"]:
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group":
prepend = config.group_name_prefix
elif self.type == "shared":
prepend = config.shared_name_prefix
else:
prepend = "#"
sidebar_color = w.color(config.color_buflist_muted_channels) if self.muted else ""
select = {
"default": prepend + self.slack_name,
"sidebar": sidebar_color + prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self):
if self.channel_buffer:
topic = self.topic['value'] or self.slack_purpose['value']
topic = unhtmlescape(unfurl_refs(topic, ignore_alt_text=False))
w.buffer_set(self.channel_buffer, "title", topic)
def set_topic(self, value):
self.topic = {"value": value}
self.render_topic()
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def mentions(self):
return {'@' + self.team.nick, self.team.myidentifier}
def highlights(self):
personal_highlights = self.team.highlight_words.union(self.mentions())
if self.muted and config.muted_channels_activity == "personal_highlights":
return personal_highlights
else:
return personal_highlights.union({"!here", "!channel", "!everyone"})
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
h_str = ",".join(self.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
def create_buffer(self):
"""
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.render_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
# if self.team.server_alias:
# w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.server_alias)
# else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
# else:
# self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def clear_messages(self):
w.buffer_clear(self.channel_buffer)
self.messages = OrderedDict()
self.hashed_messages = {}
self.got_history = False
def destroy_buffer(self, update_remote):
self.clear_messages()
self.channel_buffer = None
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = True if ts <= last_read else False
if tagset:
self.new_messages = True
# we have to infer the tagset because we weren't told
elif ts <= last_read:
tagset = "backlog"
elif self.type in ["im", "mpim"]:
if tag_nick != self.team.nick:
tagset = "dm"
self.new_messages = True
else:
tagset = "dmfromme"
else:
tagset = "default"
self.new_messages = True
tags = tag(tagset, user=tag_nick, muted=self.muted)
try:
if (config.unhide_buffers_with_activity
and not self.is_visible() and not self.muted):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if backlog or tag_nick == self.team.nick:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, subtype=None, request_dict_ext={}):
message = linkify_text(message, self.team, self)
dbg(message)
if subtype == 'me_message':
s = SlackRequest(self.team.token, "chat.meMessage",
{"channel": self.identifier, "text": message},
team_hash=self.team.team_hash,
channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
request = {"type": "message", "channel": self.identifier,
"text": message, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
self.clear_messages()
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user, None)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if not ts:
ts = next(reversed(self.messages), SlackTS())
if ts > self.last_read:
self.last_read = ts
if update_remote:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": ts}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim", "shared"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
# Add External nicklist group only for shared channels
if self.type == 'shared':
external = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL)
if not external:
external = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL, 'weechat.color.nicklist_group', 2)
if user and len(self.members) < 1000:
user = self.team.users.get(user)
# External users that have left shared channels won't exist
if not user or user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users.get(user)
if user.deleted:
continue
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, format_exc_only()))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def render(self, message, force=False):
text = message.render(force)
if isinstance(message, SlackThreadMessage):
return '{}[{}]{} {}'.format(
w.color(config.color_thread_suffix),
message.parent_message.hash or message.parent_message.ts,
w.color('reset'),
text)
return text
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name if dmuser in users else dmuser
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
if dmuser in users:
self.set_topic(create_user_status_string(users[dmuser].profile))
def set_related_server(self, team):
super(SlackDMChannel, self).set_related_server(team)
if self.user not in self.team.users:
s = SlackRequest(self.team.token, 'users.info', {'user': self.slack_name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
if config.colorize_private_chats and enable_color:
print_color = self.color
else:
print_color = ""
prepend = ""
if config.show_buflist_presence:
prepend = "+" if present else " "
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return print_color + select[style]
def open(self, update_remote=True):
self.create_buffer()
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
# def formatted_name(self, prepend="#", enable_color=True, basic=False):
# return prepend + self.slack_name
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
n = kwargs.get('name')
self.set_name(n)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['join'], {'users': ','.join(self.members)}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
@staticmethod
def adjust_name(n):
return "|".join("-".join(n.split("-")[1:-1]).split("--"))
def set_name(self, n):
self.name = self.adjust_name(n)
def formatted_name(self, style="default", typing=False, **kwargs):
adjusted_name = self.adjust_name(self.slack_name)
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": adjusted_name,
"sidebar": prepend + adjusted_name,
"base": adjusted_name,
"long_default": "{}.{}".format(self.team.preferred_name, adjusted_name),
"long_base": "{}.{}".format(self.team.preferred_name, adjusted_name),
}
return select[style]
def rename(self):
pass
class SlackSharedChannel(SlackChannel):
def __init__(self, eventrouter, **kwargs):
super(SlackSharedChannel, self).__init__(eventrouter, **kwargs)
self.type = 'shared'
def set_related_server(self, team):
super(SlackSharedChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team.token, 'conversations.members', {'channel': self.identifier}, team_hash=team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def get_history(self, slow_queue=False):
# Get info for external users in the channel
for user in self.members - set(self.team.users.keys()):
s = SlackRequest(self.team.token, 'users.info', {'user': user}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
super(SlackSharedChannel, self).get_history(slow_queue)
def set_name(self, slack_name):
self.name = config.shared_name_prefix + slack_name
class SlackThreadChannel(SlackChannelCommon):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.hashed_messages = {}
self.channel_buffer = None
# self.identifier = ""
# self.name = "#" + kwargs['name']
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
self.last_line_from = None
# self.set_name(self.slack_name)
# def set_name(self, slack_name):
# self.name = "#" + slack_name
@property
def identifier(self):
return self.parent_message.channel.identifier
@property
def messages(self):
return self.parent_message.channel.messages
@property
def muted(self):
return self.parent_message.channel.muted
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
# backlog = False
# if ts <= SlackTS(self.last_read):
# tags = tag("backlog")
# backlog = True
# elif self.type in ["im", "mpdm"]:
# tags = tag("dm")
# self.new_messages = True
# else:
tags = tag("default", thread=True, muted=self.muted)
# self.new_messages = True
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if tag_nick == self.team.nick:
self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in self.parent_message.submessages:
text = self.render(message)
self.buffer_prnt(message.sender, text, message.ts)
def main_message_keys_reversed(self):
return (message.ts for message in reversed(self.parent_message.submessages))
def send_message(self, message, subtype=None):
if subtype == 'me_message':
w.prnt("", "ERROR: /me is not supported in threads")
return w.WEECHAT_RC_ERROR
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "text": message,
"channel": self.parent_message.channel.identifier,
"thread_ts": str(self.parent_message.ts),
"user": self.team.myidentifier}
self.team.send_to_websocket(request)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
# if "info" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
# if update_remote:
# if "join" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def create_buffer(self):
"""
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.render(self.parent_message) )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
# try:
# if self.unread_count != 0:
# for c in range(1, self.unread_count):
# if self.type == "im":
# w.buffer_set(self.channel_buffer, "hotlist", "2")
# else:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
# else:
# pass
# #dbg("no unread in {}".format(self.name))
# except:
# pass
# dbg("exception no unread count")
# if self.unread_count != 0 and not self.muted:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
def destroy_buffer(self, update_remote):
self.channel_buffer = None
self.got_history = False
self.active = False
def render(self, message, force=False):
return message.render(force)
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, originating_team_id, **kwargs):
self.identifier = kwargs["id"]
# These attributes may be missing in the response, so we have to make
# sure they're set
self.profile = {}
self.presence = kwargs.get("presence", "unknown")
self.deleted = kwargs.get("deleted", False)
self.is_external = (not kwargs.get("is_bot") and
kwargs.get("team_id") != originating_team_id)
for key, value in kwargs.items():
setattr(self, key, value)
if self.profile.get("display_name"):
self.slack_name = self.profile["display_name"]
self.name = self.profile["display_name"].replace(' ', '')
else:
# No display name set. Fall back to the deprecated username field.
self.slack_name = kwargs["name"]
self.name = self.slack_name
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
self.color = w.color(self.color_name)
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
if enable_color:
return self.color + prepend + self.name + w.color("reset")
else:
return prepend + self.name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, originating_team_id, **kwargs):
super(SlackBot, self).__init__(originating_team_id, is_bot=True, **kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.thread_channel = None
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.ts = SlackTS(message_json['ts'])
def __hash__(self):
return hash(self.ts)
def open_thread(self, switch=False):
if not self.thread_channel or not self.thread_channel.active:
self.thread_channel = SlackThreadChannel(EVENTROUTER, self)
self.thread_channel.open()
if switch:
w.buffer_set(self.thread_channel.channel_buffer, "display", "1")
def render(self, force=False):
text = render(self.message_json, self.team, force)
if (self.message_json.get('subtype') == 'me_message' and
not self.message_json['text'].startswith(self.sender)):
text = "{} {}".format(self.sender, text)
if (self.message_json.get('subtype') in ('channel_join', 'group_join') and
self.message_json.get('inviter')):
inviter_id = self.message_json.get('inviter')
inviter_nick = unfurl_refs("<@{}>".format(inviter_id))
text += " by invitation from {}".format(inviter_nick)
if len(self.submessages) > 0:
text += " {}[ Thread: {} Replies: {} ]".format(
w.color(config.color_thread_suffix),
self.hash or self.ts,
len(self.submessages))
return text
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
user = self.team.users.get(self.message_json.get('user'))
if user:
name = "{}".format(user.formatted_name())
name_plain = "{}".format(user.formatted_name(enable_color=False))
if user.is_external:
name += config.external_user_suffix
name_plain += config.external_user_suffix
elif 'username' in self.message_json:
username = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(username)
name_plain = "{}".format(username)
else:
name = "-{}-".format(username)
name_plain = "{}".format(username)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
def has_mention(self):
return w.string_has_highlight(self.message_json.get('text'), ",".join(self.channel.mentions()))
def notify_thread(self, action=None, sender_id=None):
if config.auto_open_threads:
self.open_thread()
elif sender_id != self.team.myidentifier:
if action == "mention":
template = "You were mentioned in thread {hash}, channel {channel}"
elif action == "participant":
template = "New message in thread {hash}, channel {channel} in which you participated"
elif action == "response":
template = "New message in thread {hash} in response to own message in {channel}"
else:
template = "Notification for message in thread {hash}, channel {channel}"
message = template.format(hash=self.hash, channel=self.channel.formatted_name())
self.team.buffer_prnt(message, message=True)
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_message, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_message = parent_message
class WeeSlackMetadata(object):
"""
A simple container that we pickle/unpickle to hold data.
"""
def __init__(self, meta):
self.meta = meta
def jsonify(self):
return self.meta
class Hdata(object):
def __init__(self, w):
self.buffer = w.hdata_get('buffer')
self.line = w.hdata_get('line')
self.line_data = w.hdata_get('line_data')
self.lines = w.hdata_get('lines')
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
else:
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = pickle.loads(login_data["wee_slack_request_metadata"])
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token starting with {}: {}"
.format(metadata.token[:15], login_data["error"]))
if not re.match(r"^xo\w\w(-\d+){3}-[0-9a-f]+$", metadata.token):
w.prnt("", "ERROR: Token does not look like a valid Slack token. "
"Ensure it is a valid token and not just a OAuth code.")
return
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['self']['name'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(login_data['team']['id'], **item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(login_data['team']['id'], **item)
channels = {}
for item in login_data["channels"]:
if item["is_shared"]:
channels[item["id"]] = SlackSharedChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["name"].startswith('mpdm-'):
channels[item["id"]] = SlackMPDMChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
login_data['url'],
login_data["team"],
login_data["self"]["name"],
login_data["self"]["id"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
t.set_reconnect_url(login_data['url'])
t.connect()
t.buffer_prnt('Connected to Slack team {} ({}) with username {}'.format(
login_data["team"]["name"], t.domain, t.nick))
dbg("connected to {}".format(t.domain))
def handle_emojilist(emoji_json, eventrouter, **kwargs):
if emoji_json["ok"]:
request_metadata = pickle.loads(emoji_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, **kwargs):
request_metadata = pickle.loads(channel_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.set_unread_count_display(channel_json['channel'].get('unread_count_display', 0))
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, **kwargs):
request_metadata = pickle.loads(group_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
group = team.channels[request_metadata.channel_identifier]
group.set_unread_count_display(group_json['group'].get('unread_count_display', 0))
def handle_conversationsopen(conversation_json, eventrouter, object_name='channel', **kwargs):
request_metadata = pickle.loads(conversation_json["wee_slack_request_metadata"])
# Set unread count if the channel isn't new (channel_identifier exists)
if hasattr(request_metadata, 'channel_identifier'):
team = eventrouter.teams[request_metadata.team_hash]
conversation = team.channels[request_metadata.channel_identifier]
unread_count_display = conversation_json[object_name].get('unread_count_display', 0)
conversation.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, object_name='group', **kwargs):
handle_conversationsopen(mpim_json, eventrouter, object_name, **kwargs)
def handle_groupshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_channelshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_imhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_mpimhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_conversationshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_history(message_json, eventrouter, **kwargs):
request_metadata = pickle.loads(message_json["wee_slack_request_metadata"])
kwargs['team'] = eventrouter.teams[request_metadata.team_hash]
kwargs['channel'] = kwargs['team'].channels[request_metadata.channel_identifier]
if getattr(request_metadata, 'clear', False):
kwargs['channel'].clear_messages()
kwargs['channel'].got_history = True
for message in reversed(message_json["messages"]):
# Don't download historical files, considering that
# background_load_all_history might be on.
process_message(message, eventrouter, download=False, **kwargs)
def handle_conversationsmembers(members_json, eventrouter, **kwargs):
request_metadata = pickle.loads(members_json['wee_slack_request_metadata'])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.members = set(members_json['members'])
def handle_usersinfo(user_json, eventrouter, **kwargs):
request_metadata = pickle.loads(user_json['wee_slack_request_metadata'])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
user_info = user_json['user']
user = SlackUser(team.identifier, **user_info)
team.users[user_info['id']] = user
if channel.type == 'shared':
channel.update_nicklist(user_info['id'])
elif channel.type == 'im':
channel.slack_name = user.name
channel.set_topic(create_user_status_string(user.profile))
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, **kwargs):
kwargs['team'].subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, **kwargs):
kwargs['team'].set_reconnect_url(message_json['url'])
def process_manual_presence_change(message_json, eventrouter, **kwargs):
process_presence_change(message_json, eventrouter, **kwargs)
def process_presence_change(message_json, eventrouter, **kwargs):
if "user" in kwargs:
# TODO: remove once it's stable
user = kwargs["user"]
team = kwargs["team"]
team.update_member_presence(user, message_json["presence"])
if "users" in message_json:
team = kwargs["team"]
for user_id in message_json["users"]:
user = team.users[user_id]
team.update_member_presence(user, message_json["presence"])
def process_pref_change(message_json, eventrouter, **kwargs):
team = kwargs["team"]
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, **kwargs):
"""
Currently only used to update status, but lots here we could do.
"""
user = message_json['user']
profile = user.get('profile')
team = kwargs['team']
team_user = team.users.get(user['id'])
if team_user:
team_user.update_status(profile.get('status_emoji'), profile.get('status_text'))
dmchannel = team.find_channel_by_members({user['id']}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if channel:
channel.set_typing(team.users.get(message_json["user"]).name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, **kwargs):
user = message_json['user']
team = kwargs["team"]
team.users[user["id"]] = SlackUser(team.identifier, **user)
def process_pong(message_json, eventrouter, **kwargs):
pass
def process_message(message_json, eventrouter, store=True, download=True, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if SlackTS(message_json["ts"]) in channel.messages:
return
if "thread_ts" in message_json and "reply_count" not in message_json:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype")
subtype_functions = get_functions_with_prefix("subprocess_")
if subtype in subtype_functions:
subtype_functions[subtype](message_json, eventrouter, channel, team)
else:
message = SlackMessage(message_json, team, channel)
text = channel.render(message)
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
if subtype == 'me_message':
prefix = w.prefix("action").rstrip()
else:
prefix = message.sender
channel.buffer_prnt(prefix, text, message.ts,
tag_nick=message.sender_plain, **kwargs)
channel.unread_count_display += 1
if store:
channel.store_message(message, team)
dbg("NORMAL REPLY {}".format(message_json))
if download:
download_files(message_json, **kwargs)
def download_files(message_json, **kwargs):
team = kwargs["team"]
download_location = config.files_download_location
if not download_location:
return
if not os.path.exists(download_location):
try:
os.makedirs(download_location)
except:
w.prnt('', 'ERROR: Failed to create directory at files_download_location: {}'
.format(format_exc_only()))
def fileout_iter(path):
yield path
main, ext = os.path.splitext(path)
for i in count(start=1):
yield main + "-{}".format(i) + ext
for f in message_json.get('files', []):
if f.get('mode') == 'tombstone':
continue
filetype = '' if f['title'].endswith(f['filetype']) else '.' + f['filetype']
filename = '{}_{}{}'.format(team.preferred_name, f['title'], filetype)
for fileout in fileout_iter(os.path.join(download_location, filename)):
if os.path.isfile(fileout):
continue
weechat.hook_process_hashtable(
"url:" + f['url_private'],
{
'file_out': fileout,
'httpheader': 'Authorization: Bearer ' + team.token
},
config.slack_timeout, "", "")
break
def subprocess_thread_message(message_json, eventrouter, channel, team):
# print ("THREADED: " + str(message_json))
parent_ts = message_json.get('thread_ts', None)
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts), None)
if parent_message:
message = SlackThreadMessage(
parent_message, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
if parent_message.thread_channel and parent_message.thread_channel.active:
parent_message.thread_channel.buffer_prnt(message.sender, parent_message.thread_channel.render(message), message.ts, tag_nick=message.sender_plain)
elif message.ts > channel.last_read and message.has_mention():
parent_message.notify_thread(action="mention", sender_id=message_json["user"])
if config.thread_messages_in_channel:
channel.buffer_prnt(
message.sender, channel.render(message), message.ts, tag_nick=message.sender_plain)
# channel = channels.find(message_json["channel"])
# server = channel.server
# #threadinfo = channel.get_message(message_json["thread_ts"])
# message = Message(message_json, server=server, channel=channel)
# dbg(message, main_buffer=True)
#
# orig = channel.get_message(message_json['thread_ts'])
# if orig[0]:
# channel.get_message(message_json['thread_ts'])[2].add_thread_message(message)
# else:
# dbg("COULDN'T find orig message {}".format(message_json['thread_ts']), main_buffer=True)
# if threadinfo[0]:
# channel.messages[threadinfo[1]].become_thread()
# message_json["item"]["ts"], message_json)
# channel.change_message(message_json["thread_ts"], None, message_json["text"])
# channel.become_thread(message_json["item"]["ts"], message_json)
def subprocess_channel_join(message_json, eventrouter, channel, team):
joinprefix = w.prefix("join").strip()
message = SlackMessage(message_json, team, channel, override_sender=joinprefix)
channel.buffer_prnt(joinprefix, channel.render(message), message_json["ts"], tagset='joinleave')
channel.user_joined(message_json['user'])
def subprocess_channel_leave(message_json, eventrouter, channel, team):
leaveprefix = w.prefix("quit").strip()
message = SlackMessage(message_json, team, channel, override_sender=leaveprefix)
channel.buffer_prnt(leaveprefix, channel.render(message), message_json["ts"], tagset='joinleave')
channel.user_left(message_json['user'])
# channel.update_nicklist(message_json['user'])
# channel.update_nicklist()
subprocess_group_join = subprocess_channel_join
subprocess_group_leave = subprocess_channel_leave
def subprocess_message_replied(message_json, eventrouter, channel, team):
parent_ts = message_json["message"].get("thread_ts")
parent_message = channel.messages.get(SlackTS(parent_ts))
# Thread exists but is not open yet
if parent_message is not None \
and not (parent_message.thread_channel and parent_message.thread_channel.active):
channel.hash_message(parent_ts)
last_message = max(message_json["message"]["replies"], key=lambda x: x["ts"])
if message_json["message"].get("user") == team.myidentifier:
parent_message.notify_thread(action="response", sender_id=last_message["user"])
elif any(team.myidentifier == r["user"] for r in message_json["message"]["replies"]):
parent_message.notify_thread(action="participant", sender_id=last_message["user"])
def subprocess_message_changed(message_json, eventrouter, channel, team):
new_message = message_json.get("message", None)
channel.change_message(new_message["ts"], message_json=new_message)
def subprocess_message_deleted(message_json, eventrouter, channel, team):
message = "{}{}{}".format(
w.color("red"), '(deleted)', w.color("reset"))
channel.change_message(message_json["deleted_ts"], text=message)
def subprocess_channel_topic(message_json, eventrouter, channel, team):
text = unhtmlescape(unfurl_refs(message_json["text"], ignore_alt_text=False))
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"], tagset="topic")
channel.set_topic(message_json["topic"])
def process_reply(message_json, eventrouter, **kwargs):
team = kwargs["team"]
reply_to = int(message_json["reply_to"])
original_message_json = team.ws_replies.pop(reply_to, None)
if original_message_json:
original_message_json.update(message_json)
channel = team.channels[original_message_json.get('channel')]
process_message(original_message_json, eventrouter,
channel=channel, team=team)
dbg("REPLY {}".format(message_json))
else:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, **kwargs):
"""
complete
"""
channel = kwargs["channel"]
ts = message_json.get("ts", None)
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
def process_group_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_im_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_mpim_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_channel_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
kwargs['team'].channels[item["id"]].update_from_message_json(item)
kwargs['team'].channels[item["id"]].open()
def process_channel_created(message_json, eventrouter, **kwargs):
item = message_json["channel"]
c = SlackChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].buffer_prnt('Channel created: {}'.format(c.slack_name))
def process_channel_rename(message_json, eventrouter, **kwargs):
item = message_json["channel"]
channel = kwargs['team'].channels[item["id"]]
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, **kwargs):
team = kwargs['team']
item = message_json["channel"]
c = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = c
kwargs['team'].buffer_prnt('IM channel created: {}'.format(c.name))
def process_im_open(message_json, eventrouter, **kwargs):
channel = kwargs['channel']
item = message_json
kwargs['team'].channels[item["channel"]].check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels[message_json["channel"]]
if channel.channel_buffer:
w.prnt(kwargs['team'].channel_buffer,
'IM {} closed by another client or the server'.format(channel.name))
eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, False, True)
def process_group_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
c = SlackMPDMChannel(eventrouter, team=kwargs["team"], **item)
else:
c = SlackGroupChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].channels[item["id"]].open()
def process_reaction_added(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_emoji_changed(message_json, eventrouter, **kwargs):
team = kwargs['team']
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*\n`]+)\*(?=[^\w]|$)',
r'\1{}*\2*{}'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text,
flags=re.UNICODE)
text = re.sub(r'(^| )_([^_\n`]+)_(?=[^\w]|$)',
r'\1{}_\2_{}'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text,
flags=re.UNICODE)
return text
def render(message_json, team, force=False):
# If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
# server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = ""
else:
text = ""
text = unfurl_refs(text)
if "edited" in message_json:
text += "{}{}{}".format(
w.color(config.color_edited_suffix), ' (edited)', w.color("reset"))
text += unfurl_refs(unwrap_attachments(message_json, text))
text += unfurl_refs(unwrap_files(message_json, text))
text = text.lstrip()
text = unhtmlescape(text.replace("\t", " "))
if message_json.get('mrkdwn', True):
text = render_formatting(text)
text += create_reaction_string(message_json.get("reactions", ""))
message_json["_rendered_text"] = text
return text
def linkify_text(message, team, channel):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
message = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.split(' '))
for item in enumerate(message):
targets = re.match('^\s*([@#])([\w\(\)\'.-]+)(\W*)', item[1], re.UNICODE)
if targets and targets.groups()[0] == '@':
named = targets.groups()
if named[1] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[1])
else:
try:
if usernames[named[1]]:
message[item[0]] = "<@{}>{}".format(usernames[named[1]], named[2])
except:
message[item[0]] = "@{}{}".format(named[1], named[2])
if targets and targets.groups()[0] == '#':
named = targets.groups()
try:
if channels[named[1]]:
message[item[0]] = "<#{}|{}>{}".format(channels[named[1]], named[1], named[2])
except:
message[item[0]] = "#{}{}".format(named[1], named[2])
# dbg(message)
return " ".join(message)
def unfurl_refs(text, ignore_alt_text=None, auto_link_display=None):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
if ignore_alt_text is None:
ignore_alt_text = config.unfurl_ignore_alt_text
if auto_link_display is None:
auto_link_display = config.unfurl_auto_link_display
matches = re.findall(r"(<[@#]?(?:[^>]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(
m, unfurl_ref(m[1:-1], ignore_alt_text, auto_link_display))
return text
def unfurl_ref(ref, ignore_alt_text, auto_link_display):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C"):
display_text = "#{}".format(ref.split('|')[1])
elif id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
match_url = r"^\w+:(//)?{}$".format(re.escape(desc))
url_matches_desc = re.match(match_url, url)
if url_matches_desc and auto_link_display == "text":
display_text = desc
elif url_matches_desc and auto_link_display == "url":
display_text = url
else:
display_text = "{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments", None)
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title', None)
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text", None)
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
image_url = attachment.get('image_url', '')
if image_url not in text_before_unescaped and image_url != title_link:
t.append(image_url)
fields = attachment.get("fields", None)
if fields:
for f in fields:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback", None)
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def unwrap_files(message_json, text_before):
files_texts = []
for f in message_json.get('files', []):
if f.get('mode', '') != 'tombstone':
text = '{} ({})'.format(f['url_private'], f['title'])
else:
text = '{}(This file was deleted.){}'.format(
w.color("red"),
w.color("reset"))
files_texts.append(text)
if text_before:
files_texts.insert(0, '')
return "\n".join(files_texts)
def resolve_ref(ref):
# TODO: This hack to use eventrouter needs to go
# this resolver should probably move to the slackteam or eventrouter itself
# global EVENTROUTER
if 'EVENTROUTER' in globals():
e = EVENTROUTER
if ref.startswith('@U') or ref.startswith('@W'):
for t in e.teams.keys():
user = e.teams[t].users.get(ref[1:])
if user:
name = '@{}'.format(user.name)
if user.is_external:
name += config.external_user_suffix
return name
elif ref.startswith('#C'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].channels:
# try:
return "{}".format(e.teams[t].channels[ref[1:]].name)
# except:
# dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = profile.get("status_emoji")
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " {}[{}]{}".format(
w.color(config.color_reaction_suffix), reactions, w.color("reset"))
else:
reaction_string = ' {}['.format(w.color(config.color_reaction_suffix))
for r in reactions:
if len(r["users"]) > 0:
count += 1
if config.show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def hdata_line_ts(line_pointer):
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
ts_major = w.hdata_time(hdata.line_data, data, 'date')
ts_minor = w.hdata_time(hdata.line_data, data, 'date_printed')
return (ts_major, ts_minor)
def modify_buffer_line(buffer_pointer, ts, new_text):
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
# Find the last line with this ts
while line_pointer and hdata_line_ts(line_pointer) != (ts.major, ts.minor):
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
# Find all lines for the message
pointers = []
while line_pointer and hdata_line_ts(line_pointer) == (ts.major, ts.minor):
pointers.append(line_pointer)
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
pointers.reverse()
# Split the message into at most the number of existing lines as we can't insert new lines
lines = new_text.split('\n', len(pointers) - 1)
# Replace newlines to prevent garbled lines in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# Extend lines in case the new message is shorter than the old as we can't delete lines
lines += [''] * (len(pointers) - len(lines))
for pointer, line in zip(pointers, lines):
data = w.hdata_pointer(hdata.line, pointer, 'data')
w.hdata_update(hdata.line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_last_print_time(buffer_pointer, ts_minor):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
while line_pointer:
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
w.hdata_update(hdata.line_data, data, {"date_printed": str(ts_minor)})
if w.hdata_string(hdata.line_data, data, 'prefix'):
# Reached the first line of the message, so stop here
break
# Move one line backwards so all lines of the message are set
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
return w.WEECHAT_RC_OK
def format_nick(nick, previous_nick=None):
if nick == previous_nick:
nick = w.config_string(w.config_get('weechat.look.prefix_same_nick')) or nick
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_prefix_color = w.color(nick_prefix_color_name)
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix_color = w.color(nick_suffix_color_name)
return nick_prefix_color + nick_prefix + w.color("reset") + nick + nick_suffix_color + nick_suffix + w.color("reset")
def tag(tagset, user=None, thread=False, muted=False):
tagsets = {
# messages in the team/server buffer, e.g. "new channel created"
"team_info": {"no_highlight", "log3"},
"team_message": {"irc_privmsg", "notify_message", "log1"},
# when replaying something old
"backlog": {"irc_privmsg", "no_highlight", "notify_none", "logger_backlog"},
# when receiving a direct message
"dm": {"irc_privmsg", "notify_private", "log1"},
"dmfromme": {"irc_privmsg", "no_highlight", "notify_none", "log1"},
# when this is a join/leave, attach for smart filter ala:
# if user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]
"joinleave": {"irc_smart_filter", "no_highlight", "log4"},
"topic": {"irc_topic", "no_highlight", "log3"},
# catchall ?
"default": {"irc_privmsg", "notify_message", "log1"},
}
nick_tag = {"nick_{}".format(user or "unknown").replace(" ", "_")}
slack_tag = {"slack_{}".format(tagset)}
tags = nick_tag | slack_tag | tagsets[tagset]
if muted:
tags.add("slack_muted_channel")
if not thread and config.muted_channels_activity != "all":
tags -= {"notify_highlight", "notify_message", "notify_private"}
tags.add("notify_none")
if config.muted_channels_activity == "none":
tags.add("no_highlight")
return ",".join(tags)
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0][1:]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "#{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer, 'Topic for {} is "{}"'.format(channel.name, channel.topic))
else:
s = SlackRequest(team.token, "channels.setTopic", {"channel": channel.identifier, "topic": topic}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <display_name>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
if u.profile.get("status_text"):
team.buffer_prnt("[{}]: {} {}".format(user, u.profile.get('status_emoji', ''), u.profile.get('status_text', '')))
team.buffer_prnt("[{}]: Real name: {}".format(user, u.profile.get('real_name_normalized', '')))
team.buffer_prnt("[{}]: Title: {}".format(user, u.profile.get('title', '')))
team.buffer_prnt("[{}]: Email: {}".format(user, u.profile.get('email', '')))
team.buffer_prnt("[{}]: Phone: {}".format(user, u.profile.get('phone', '')))
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
message = args.split(' ', 1)[1]
channel.send_message(message, subtype='me_message')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_register(data, current_buffer, args):
"""
/slack register
Register a Slack team in wee-slack.
"""
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
if not args:
message = textwrap.dedent("""
#### Retrieving a Slack token via OAUTH ####
1) Paste this into a browser: https://slack.com/oauth/authorize?client_id=2468770254.51917335286&scope=client
2) Select the team you wish to access from wee-slack in your browser.
3) Click "Authorize" in the browser **IMPORTANT: the redirect will fail, this is expected**
If you get a message saying you are not authorized to install wee-slack, the team has restricted Slack app installation and you will have to request it from an admin. To do that, go to https://my.slack.com/apps/A1HSZ9V8E-wee-slack and click "Request to Install".
4) Copy the "code" portion of the URL to your clipboard
5) Return to weechat and run `/slack register [code]`
""").strip()
w.prnt("", message)
return w.WEECHAT_RC_OK_EAT
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, args)
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
w.hook_process_hashtable('url:', params, config.slack_timeout, "", "")
w.hook_process_hashtable("url:{}".format(uri), params, config.slack_timeout, "register_callback", "")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def register_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got return code {}. Err: ".format(return_code, err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
if len(out) <= 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got 0 length answer. Err: ".format(err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
d = json.loads(out)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return w.WEECHAT_RC_OK_EAT
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', d['access_token'])
else:
# Add new token to existing set, joined by comma.
tok = config.get_string('slack_api_token')
w.config_set_plugin('slack_api_token',
','.join([tok, d['access_token']]))
w.prnt("", "Success! Added team \"%s\"" % (d['team_name'],))
w.prnt("", "Please reload wee-slack with: /python reload slack")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
aargs = args.split(None, 2)
who = aargs[1]
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].slack_name
else:
join_query_command_cb(data, current_buffer, '/query ' + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
"""
/slack channels
List the channels in the current team.
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Channels:")
for channel in team.get_channel_map():
team.buffer_prnt(" {}".format(channel))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
"""
/slack users
List the users in the current team.
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Users:")
for user in team.users.values():
team.buffer_prnt(" {:<25}({})".format(user.name, user.presence))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_talk(data, current_buffer, args):
"""
/slack talk <user>[,<user2>[,<user3>...]]
Open a chat with the specified user(s).
"""
if not args:
w.prnt('', 'Usage: /slack talk <user>[,<user2>[,<user3>...]]')
return w.WEECHAT_RC_ERROR
return join_query_command_cb(data, current_buffer, '/query ' + args)
@slack_buffer_or_ignore
@utf8_decode
def join_query_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
if len(split_args) < 2 or not split_args[1]:
w.prnt('', 'Too few arguments for command "{}" (help on command: /help {})'
.format(split_args[0], split_args[0].lstrip('/')))
return w.WEECHAT_RC_OK_EAT
query = split_args[1]
# Try finding the channel by name
channel = team.channels.get(team.get_channel_map().get(query.lstrip('#')))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not channel:
if query.startswith('#'):
w.prnt('', 'ERROR: Unknown channel: {}'.format(query))
return w.WEECHAT_RC_OK_EAT
# Get the IDs of the users
all_users = team.get_username_map()
users = set()
for username in query.split(','):
user = all_users.get(username.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(username))
return w.WEECHAT_RC_OK_EAT
users.add(user)
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
channel = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not channel:
s = SlackRequest(team.token, SLACK_API_TRANSLATOR[channel_type]['join'],
{'users': ','.join(users)}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
if channel:
channel.open()
if config.switch_buffer_on_join:
w.buffer_set(channel.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_showmuted(data, current_buffer, args):
"""
/slack showmuted
List the muted channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
muted_channels = [team.channels[key].name
for key in team.muted_channels if key in team.channels]
team.buffer_prnt("Muted channels: {}".format(', '.join(muted_channels)))
return w.WEECHAT_RC_OK_EAT
def get_msg_from_id(channel, msg_id):
if msg_id[0] == '$':
msg_id = msg_id[1:]
return channel.hashed_messages.get(msg_id)
@slack_buffer_required
@utf8_decode
def command_thread(data, current_buffer, args):
"""
/thread [message_id]
Open the thread for the message.
If no message id is specified the last thread in channel will be opened.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if args:
msg = get_msg_from_id(channel, args)
if not msg:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
else:
for message in reversed(channel.messages.values()):
if type(message) == SlackMessage and len(message.submessages) > 0:
msg = message
break
else:
w.prnt('', 'ERROR: No threads found in channel')
return w.WEECHAT_RC_OK_EAT
msg.open_thread(switch=config.switch_buffer_on_join)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_reply(data, current_buffer, args):
"""
/reply <count/message_id> <text>
Reply in a thread on the message. Specify either the message id
or a count upwards to the message from the last message.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
try:
msg_id, text = args.split(None, 1)
except ValueError:
w.prnt('', 'Usage: /reply <count/id> <message>')
return w.WEECHAT_RC_OK_EAT
msg = get_msg_from_id(channel, msg_id)
if msg:
parent_id = str(msg.ts)
elif msg_id.isdigit() and int(msg_id) >= 1:
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, int(msg_id) - 1, None)))
else:
w.prnt('', 'ERROR: Invalid id given, must be a number greater than 0 or an existing id')
return w.WEECHAT_RC_OK_EAT
channel.send_message(text, request_dict_ext={'thread_ts': parent_id})
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_rehistory(data, current_buffer, args):
"""
/rehistory
Reload the history in the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.clear_messages()
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_hide(data, current_buffer, args):
"""
/hide
Hide the current channel if it is marked as distracting.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
name = channel.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(channel.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
split_args = args.split(' ', 1)
cmd_name = split_args[0]
cmd_args = split_args[1] if len(split_args) > 1 else ''
cmd = EVENTROUTER.cmds.get(cmd_name or 'help')
if not cmd:
w.prnt('', 'Command not found: ' + cmd_name)
return w.WEECHAT_RC_OK
return cmd(data, current_buffer, cmd_args)
@utf8_decode
def command_help(data, current_buffer, args):
"""
/slack help
Print help for /slack commands.
"""
if args:
cmd = EVENTROUTER.cmds.get(args)
if cmd:
cmds = {args: cmd}
else:
w.prnt('', 'Command not found: ' + args)
return w.WEECHAT_RC_OK
else:
cmds = EVENTROUTER.cmds
w.prnt('', 'Slack commands:')
for name, cmd in sorted(cmds.items()):
helptext = (cmd.__doc__ or '').rstrip()
w.prnt('', '{}:{}'.format(name, helptext))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_distracting(data, current_buffer, args):
"""
/slack distracting
Add or remove the current channel from distracting channels. You can hide
or unhide these channels with /slack nodistractions.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
fullname = channel.formatted_name(style="long_default")
if fullname in config.distracting_channels:
config.distracting_channels.remove(fullname)
else:
config.distracting_channels.append(fullname)
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
s = SlackRequest(team.token, "chat.command",
{"command": command, "text": text, 'channel': channel.identifier},
team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_mute(data, current_buffer, args):
"""
/slack mute
Toggle mute on the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
team.muted_channels ^= {channel.identifier}
muted_str = "Muted" if channel.identifier in team.muted_channels else "Unmuted"
team.buffer_prnt("{} channel {}".format(muted_str, channel.name))
s = SlackRequest(team.token, "users.prefs.set",
{"name": "muted_channels", "value": ",".join(team.muted_channels)},
team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_linkarchive(data, current_buffer, args):
"""
/slack linkarchive [message_id]
Place a link to the channel or message in the input bar.
Use cursor or mouse mode to get the id.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://{}/'.format(channel.team.domain)
if isinstance(channel, SlackChannelCommon):
url += 'archives/{}/'.format(channel.identifier)
if args:
if args[0] == '$':
message_id = args[1:]
else:
message_id = args
message = channel.hashed_messages.get(message_id)
if message:
url += 'p{}{:0>6}'.format(message.ts.majorstr(), message.ts.minorstr())
if isinstance(message, SlackThreadMessage):
url += "?thread_ts={}&cid={}".format(message.parent_message.ts, channel.identifier)
else:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
w.command(current_buffer, "/input insert {}".format(url))
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_nodistractions(data, current_buffer, args):
"""
/slack nodistractions
Hide or unhide all channels marked as distracting.
"""
global hide_distractions
hide_distractions = not hide_distractions
channels = [channel for channel in EVENTROUTER.weechat_controller.buffers.itervalues()
if channel in config.distracting_channels]
for channel in channels:
w.buffer_set(channel.channel_buffer, "hidden", str(int(hide_distractions)))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_upload(data, current_buffer, args):
"""
/slack upload <filename>
Uploads a file to the current buffer.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://slack.com/api/files.upload'
file_path = os.path.expanduser(args)
if ' ' in file_path:
file_path = file_path.replace(' ', '\ ')
# only http proxy is currenlty supported
proxy = ProxyWrapper()
proxy_string = proxy.curl()
form_fields = {
'file': '@' + file_path,
'channels': channel.identifier,
'token': channel.team.token,
}
if isinstance(channel, SlackThreadChannel):
form_fields['thread_ts'] = channel.parent_message.ts
curl_options = ' '.join(
'-F {}={}'.format(*field) for field in form_fields.iteritems())
command = 'curl {} {} {}'.format(curl_options, proxy_string, url)
w.hook_process(command, config.slack_timeout, '', '')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def away_command_cb(data, current_buffer, args):
all_servers, message = re.match('^/away( -all)? ?(.*)', args).groups()
if all_servers:
team_buffers = [team.channel_buffer for team in EVENTROUTER.teams.values()]
else:
team_buffers = [current_buffer]
for team_buffer in team_buffers:
if message:
command_away(data, team_buffer, args)
else:
command_back(data, team_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_away(data, current_buffer, args):
"""
/slack away
Sets your status as 'away'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "away"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_status(data, current_buffer, args):
"""
/slack status [emoji [status_message]]
Lets you set your Slack Status (not to be confused with away/here).
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
emoji = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
profile = {"status_text": text, "status_emoji": emoji}
s = SlackRequest(team.token, "users.profile.set", {"profile": profile}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@utf8_decode
def line_event_cb(data, signal, hashtable):
buffer_pointer = hashtable["_buffer"]
line_timestamp = hashtable["_chat_line_date"]
line_time_id = hashtable["_chat_line_date_printed"]
channel = EVENTROUTER.weechat_controller.buffers.get(buffer_pointer)
if line_timestamp and line_time_id and isinstance(channel, SlackChannelCommon):
ts = SlackTS("{}.{}".format(line_timestamp, line_time_id))
message_hash = channel.hash_message(ts)
if message_hash is None:
return w.WEECHAT_RC_OK
message_hash = "$" + message_hash
if data == "message":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert {}".format(message_hash))
elif data == "delete":
w.command(buffer_pointer, "/input send {}s///".format(message_hash))
elif data == "linkarchive":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/slack linkarchive {}".format(message_hash[1:]))
elif data == "reply":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert /reply {}\\x20".format(message_hash))
elif data == "thread":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/thread {}".format(message_hash))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_back(data, current_buffer, args):
"""
/slack back
Sets your status as 'back'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "auto"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_label(data, current_buffer, args):
"""
/label <name>
Rename a thread buffer. Note that this is not permanent. It will only last
as long as you keep the buffer and wee-slack open.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if channel.type == 'thread':
new_name = " +" + args
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
return w.WEECHAT_RC_OK
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
channel.mark_read()
return w.WEECHAT_RC_OK
###### NEW EXCEPTIONS
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def load_emoji():
try:
DIR = w.info_get("weechat_dir", "")
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
return json.loads(ef.read())["emoji"]
except:
dbg("Couldn't load emoji list: {}".format(format_exc_only()), 5)
return []
def setup_hooks():
w.bar_item_new('slack_typing_notice', '(extra)typing_bar_item_cb', '')
w.hook_timer(5000, 0, 0, "ws_ping_cb", "")
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(sorted(EVENTROUTER.cmds.keys())) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(EVENTROUTER.cmds.keys()),
# Function name
'slack_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'join_query_command_cb', '')
w.hook_command_run('/join', 'join_query_command_cb', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
for cmd in ['hide', 'label', 'rehistory', 'reply', 'thread']:
doc = EVENTROUTER.cmds[cmd].__doc__.strip().split('\n', 1)
args = ' '.join(doc[0].split()[1:])
description = textwrap.dedent(doc[1])
w.hook_command(cmd, description, args, '', '', 'command_' + cmd, '')
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
w.key_bind("mouse", {
"@chat(python.*):button2": "hsignal:slack_mouse",
})
w.key_bind("cursor", {
"@chat(python.*):D": "hsignal:slack_cursor_delete",
"@chat(python.*):L": "hsignal:slack_cursor_linkarchive",
"@chat(python.*):M": "hsignal:slack_cursor_message",
"@chat(python.*):R": "hsignal:slack_cursor_reply",
"@chat(python.*):T": "hsignal:slack_cursor_thread",
})
w.hook_hsignal("slack_mouse", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_delete", "line_event_cb", "delete")
w.hook_hsignal("slack_cursor_linkarchive", "line_event_cb", "linkarchive")
w.hook_hsignal("slack_cursor_message", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_reply", "line_event_cb", "reply")
w.hook_hsignal("slack_cursor_thread", "line_event_cb", "thread")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
# return
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
# w.prnt("", "---------")
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
# w.prnt(slack_debug, "---------")
w.prnt(slack_debug, message)
###### Config code
class PluginConfig(object):
Setting = collections.namedtuple('Setting', ['default', 'desc'])
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'auto_open_threads': Setting(
default='false',
desc='Automatically open threads when mentioned or in'
'response to own messages.'),
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'color_buflist_muted_channels': Setting(
default='darkgray',
desc='Color to use for muted channels in the buflist'),
'color_edited_suffix': Setting(
default='095',
desc='Color to use for (edited) suffix on messages that have been edited.'),
'color_reaction_suffix': Setting(
default='darkgray',
desc='Color to use for the [:wave:(@user)] suffix on messages that'
' have reactions attached to them.'),
'color_thread_suffix': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'external_user_suffix': Setting(
default='*',
desc='The suffix appended to nicks to indicate external users.'),
'files_download_location': Setting(
default='',
desc='If set, file attachments will be automatically downloaded'
' to this location.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'muted_channels_activity': Setting(
default='personal_highlights',
desc="Control which activity you see from muted channels, either"
" none, personal_highlights, all_highlights or all. none: Don't"
" show any activity. personal_highlights: Only show personal"
" highlights, i.e. not @channel and @here. all_highlights: Show"
" all highlights, but not other messages. all: Show all activity,"
" like other channels."),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'shared_name_prefix': Setting(
default='%',
desc='The prefix of buffer names for shared channels.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers.'),
'show_buflist_presence': Setting(
default='true',
desc='Display a `+` character in the buffer list for present users.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_messages_in_channel': Setting(
default='false',
desc='When enabled shows thread messages in the parent channel.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
# Use items() rather than iteritems() so we don't need to worry about
# invalidating the iterator.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.iteritems():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
if hasattr(self, 'get_' + key):
try:
return getattr(self, 'get_' + key)(key)
except:
return self.settings[key]
else:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
def __getattr__(self, key):
return self.settings[key]
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_color_buflist_muted_channels = get_string
get_color_edited_suffix = get_string
get_color_reaction_suffix = get_string
get_color_thread_suffix = get_string
get_debug_level = get_int
get_external_user_suffix = get_string
get_files_download_location = get_string
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_muted_channels_activity = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_shared_name_prefix = get_string
get_slack_timeout = get_int
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',') if x]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
return dict(item.split(":") for item in alias_list.split(",") if ':' in item)
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
old_thread_color_config = w.config_get_plugin("thread_suffix_color")
new_thread_color_config = w.config_get_plugin("color_thread_suffix")
if old_thread_color_config and not new_thread_color_config:
w.config_set_plugin("color_thread_suffix", old_thread_color_config)
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print >> f, 'Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename)
f.flush()
return
def initiate_connection(token, retries=3):
return SlackRequest(token,
'rtm.start',
{"batch_presence_aware": 1},
retries=retries)
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
# setup_trace()
# WEECHAT_HOME = w.info_get("weechat_dir", "")
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
# domain = None
# previous_buffer = None
# slack_buffer = None
# never_away = False
hide_distractions = False
# hotlist = w.infolist_get("hotlist", "", "")
# main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI.extend(load_emoji())
setup_hooks()
# attach to the weechat hooks we need
tokens = map(string.strip, config.slack_api_token.split(','))
w.prnt('', 'Connecting to {} slack team{}.'
.format(len(tokens), '' if len(tokens) == 1 else 's'))
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
# END attach to the weechat hooks we need
hdata = Hdata(w)
| mit | -8,762,472,178,675,239,000 | 37.339551 | 277 | 0.599475 | false |
Brocade-OpenSource/OpenStack-DNRM-Nova | nova/compute/api.py | 1 | 152761 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova import db
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
def publisher_id(aggregate_identify=None):
return notifier.publisher_id("aggregate", aggregate_identify)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
default=None,
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
SM_IMAGE_PROP_PREFIX = "image_"
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been sucessfully started
at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
notifications.send_update(context, old_ref, instance_ref, 'api')
return instance_ref
def _record_action_start(self, context, instance, action):
act = compute_utils.pack_action_start(context, instance['uuid'],
action)
self.db.action_start(context, act)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances. %(msg)s"),
{'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'msg': msg})
requested = dict(instances=min_count, cores=req_cores, ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
self.network_api.validate_networks(context, requested_networks)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
@staticmethod
def _inherit_properties_from_image(image, auto_disk_config):
image_properties = image.get('properties', {})
def prop(prop_, prop_type=None):
"""Return the value of an image property."""
value = image_properties.get(prop_)
if value is not None:
if prop_type == 'bool':
value = strutils.bool_from_string(value)
return value
options_from_image = {'os_type': prop('os_type'),
'architecture': prop('architecture'),
'vm_mode': prop('vm_mode')}
# If instance doesn't have auto_disk_config overridden by request, use
# whatever the image indicates
if auto_disk_config is None:
auto_disk_config = prop('auto_disk_config', prop_type='bool')
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
updates = {'display_name': new_name}
if not instance.get('hostname'):
updates['hostname'] = utils.sanitize_hostname(new_name)
instance = self.db.instance_update(context,
instance['uuid'], updates)
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.InstanceTypeDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.InstanceTypeDiskTooSmall()
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, min_count,
max_count, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, user_data,
metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: 'volume_id' in bdm, block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
self._check_requested_networks(context, requested_networks)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = self.db.key_pair_get(context, context.user_id,
key_name)
key_data = key_pair['public_key']
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata,
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
check_policy(context, 'create:forced_host', {})
filter_properties['force_hosts'] = [forced_host]
if forced_node:
check_policy(context, 'create:forced_host', {})
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
options = base_options.copy()
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, options,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
self._validate_bdm(context, instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
self.db.instance_destroy(context, instance['uuid'])
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_volume_image_metadata(self, context, block_device_mapping):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if bdm.get('device_name') == "vda":
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
volume = self.volume_api.get(context,
volume_id)
return volume['volume_image_metadata']
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return None
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_volume_image_metadata(context,
block_device_mapping)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(
availability_zone)
base_options = self._validate_and_build_base_options(context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, min_count, max_count, display_name,
display_description, key_name, key_data, security_groups,
availability_zone, user_data, metadata, injected_files,
access_ip_v4, access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, virtual_name):
size = 0
if virtual_name == 'swap':
size = instance_type.get('swap', 0)
elif block_device.is_ephemeral(virtual_name):
num = block_device.ephemeral_num(virtual_name)
# TODO(yamahata): ephemeralN where N > 0
# Only ephemeral0 is allowed for now because InstanceTypes
# table only allows single local disk, ephemeral_gb.
# In order to enhance it, we need to add a new columns to
# instance_types table.
if num > 0:
return 0
size = instance_type.get('ephemeral_gb')
return size
def _update_image_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
mappings):
"""tell vm driver to create ephemeral/swap device at boot time by
updating BlockDeviceMapping
"""
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
values = {
'instance_uuid': instance_uuid,
'device_name': bdm['device'],
'virtual_name': virtual_name,
'volume_size': size}
self.db.block_device_mapping_update_or_create(elevated_context,
values)
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
assert 'device_name' in bdm
values = {'instance_uuid': instance_uuid}
for key in ('device_name', 'delete_on_termination', 'virtual_name',
'snapshot_id', 'volume_id', 'volume_size',
'no_device'):
values[key] = bdm.get(key)
virtual_name = bdm.get('virtual_name')
if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)):
size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
values['volume_size'] = size
# NOTE(yamahata): NoDevice eliminates devices defined in image
# files by command line option.
# (--block-device-mapping)
if virtual_name == 'NoDevice':
values['no_device'] = True
for k in ('delete_on_termination', 'virtual_name',
'snapshot_id', 'volume_id', 'volume_size'):
values[k] = None
self.db.block_device_mapping_update_or_create(elevated_context,
values)
def _validate_bdm(self, context, instance):
for bdm in block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])):
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
mappings = image_properties.get('mappings', [])
if mappings:
self._update_image_block_device_mapping(context,
instance_type, instance_uuid, mappings)
image_bdm = image_properties.get('block_device_mapping', [])
for mapping in (image_bdm, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
# NOTE(ndipanov): Create an image bdm - at the moment
# this is not used but is done for easier transition
# in the future.
if (instance['image_ref'] and not
self.is_volume_backed_instance(context, instance, None)):
image_bdm = block_device.create_image_bdm(instance['image_ref'])
image_bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(context,
image_bdm,
legacy=False)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
hostname = instance.get('hostname')
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance['display_name'] = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance['hostname'] = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, base_options, image,
security_groups):
"""Build the beginning of a new instance."""
image_properties = image.get('properties', {})
instance = base_options
if not instance.get('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance['launch_index'] = 0
instance['vm_state'] = vm_states.BUILDING
instance['task_state'] = task_states.SCHEDULING
instance['info_cache'] = {'network_info': '[]'}
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
instance.setdefault('system_metadata', {})
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
for key, value in image_properties.iteritems():
new_value = unicode(value)[:255]
instance['system_metadata'][prefix_format % key] = new_value
# Keep a record of the original base image that this
# image's instance is derived from:
base_image_ref = image_properties.get('base_image_ref')
if not base_image_ref:
# base image ref property not previously set through a snapshot.
# default to using the image ref as the base:
base_image_ref = base_options['image_ref']
instance['system_metadata']['image_base_image_ref'] = base_image_ref
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
base_options, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
instance = self._populate_instance_for_create(base_options,
image, security_group)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance = self.db.instance_create(context, instance)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
for service in self.db.service_get_all_by_topic(context,
CONF.compute_topic):
host_name = service['host']
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
def update_state(self, context, instance, new_state):
"""Updates the state of a compute instance.
For example to 'active' or 'error'.
Also sets 'task_state' to None.
Used by admin_actions api
:param context: The security context
:param instance: The instance to update
:param new_state: A member of vm_state, eg. 'active'
"""
self.update(context, instance,
vm_state=new_state,
task_state=None)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: None
"""
_, updated = self._update(context, instance, **kwargs)
return updated
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref, instance_ref,
service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _delete(self, context, instance, cb, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
reservations = None
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
try:
# NOTE(maoy): no expected_task_state needs to be set
attrs = {'progress': 0}
attrs.update(instance_attrs)
old, updated = self._update(context,
instance,
**attrs)
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
old,
updated,
project_id)
if not host:
# Just update database, nothing else we can do
constraint = self.db.constraint(host=self.db.equal_any(host))
try:
self.db.instance_destroy(context, instance['uuid'],
constraint)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id)
return
except exception.ConstraintNotMet:
# Refresh to get new host information
instance = self.get(context, instance['uuid'])
if instance['vm_state'] == vm_states.RESIZED:
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
get_migration = self.db.migration_get_by_instance_and_status
try:
migration_ref = get_migration(context.elevated(),
instance['uuid'], 'finished')
except exception.MigrationNotFoundByStatus:
migration_ref = None
if migration_ref:
src_host = migration_ref['source_compute']
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
deltas = self._downsize_quota_delta(context, instance)
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration_ref,
host=src_host, cast=False,
reservations=downsize_reservations)
is_up = False
try:
service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id)
def _create_reservations(self, context, old_instance, new_instance,
project_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
get_migration = self.db.migration_get_by_instance_and_status
try:
migration_ref = get_migration(context.elevated(),
old_instance['uuid'], 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration_ref = None
if (migration_ref and
new_instance['instance_type_id'] ==
migration_ref['new_instance_type_id']):
old_inst_type_id = migration_ref['old_instance_type_id']
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.InstanceTypeNotFound:
LOG.warning(_("instance type %d not found"),
old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
compute_utils.notify_about_instance_usage(
context, instance, "delete.start")
elevated = context.elevated()
self.network_api.deallocate_for_instance(elevated,
instance)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
self.db.block_device_mapping_destroy(context, bdm['id'])
instance = self._instance_update(context,
instance_uuid,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
self.db.instance_destroy(context, instance_uuid)
compute_utils.notify_about_instance_usage(
context, instance, "delete.end", system_metadata=system_meta)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
def soft_delete(context, instance, bdms, reservations=None):
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
self._delete(context, instance, soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
def terminate(context, instance, bdms, reservations=None):
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
self._delete(context, instance, terminate,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
instance_type = flavors.extract_flavor(instance)
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=None,
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
if 'all_tenants' in search_opts:
check_policy(context, "get_all_tenants", target)
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
instance_type = flavors.get_flavor_by_flavor_id(
flavor_id)
filters['instance_type_id'] = instance_type['id']
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, basestring):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Live Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_LIVE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.live_snapshot_instance(context, instance=instance,
image_id=image_meta['id'])
return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
name = backup_type # daily backups are called 'daily'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'backup', backup_type=backup_type,
rotation=rotation, extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_BACKUP,
expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='backup',
backup_type=backup_type, rotation=rotation)
return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='snapshot')
return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
sent_meta = {
'name': name,
'is_public': False,
'properties': properties,
}
# Persist base image ref as a Glance image property
system_meta = self.db.instance_system_metadata_get(
context, instance_uuid)
base_image_ref = system_meta.get('image_base_image_ref')
if base_image_ref:
properties['base_image_ref'] = base_image_ref
if image_type == 'backup':
properties['backup_type'] = backup_type
elif image_type == 'snapshot':
min_ram, min_disk = self._get_minram_mindisk_params(context,
instance)
if min_ram is not None:
sent_meta['min_ram'] = min_ram
if min_disk is not None:
sent_meta['min_disk'] = min_disk
properties.update(extra_properties or {})
# Now inherit image properties from the base image
for key, value in system_meta.items():
# Trim off the image_ prefix
if key.startswith(SM_IMAGE_PROP_PREFIX):
key = key[len(SM_IMAGE_PROP_PREFIX):]
# Skip properties that are non-inheritable
if key in CONF.non_inheritable_image_properties:
continue
# By using setdefault, we ensure that the properties set
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
bdm['volume_id'] = None
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
def _get_minram_mindisk_params(self, context, instance):
try:
#try to get source image of the instance
orig_image = self.image_service.show(context,
instance['image_ref'])
except exception.ImageNotFound:
return None, None
#disk format of vhd is non-shrinkable
if orig_image.get('disk_format') == 'vhd':
instance_type = flavors.extract_flavor(instance)
min_disk = instance_type['root_gb']
else:
#set new image values to the original image values
min_disk = orig_image.get('min_disk')
min_ram = orig_image.get('min_ram')
return min_ram, min_disk
def _get_block_device_info(self, context, instance_uuid):
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid))
block_device_mapping = []
for bdm in bdms:
if not bdm['volume_id']:
continue
try:
cinfo = jsonutils.loads(bdm['connection_info'])
if cinfo and 'serial' not in cinfo:
cinfo['serial'] = bdm['volume_id']
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
except TypeError:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
pass
return {'block_device_mapping': block_device_mapping}
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance = self.update(context, instance,
task_state=state,
expected_task_state=[None,
task_states.REBOOTING])
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance['image_ref'] or ''
files_to_inject = kwargs.pop('files_to_inject', [])
metadata = kwargs.get('metadata', {})
instance_type = flavors.extract_flavor(instance)
image_id, image = self._get_image(context, image_href)
self._checks_for_create_and_rebuild(context, image_id, image,
instance_type, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that
# if the system_metadata for this instance is updated
# after we do the get and before we update.. those other
# updates will be lost. Since this problem exists in a lot
# of other places, I think it should be addressed in a DB
# layer overhaul.
sys_metadata = self.db.instance_system_metadata_get(context,
instance['uuid'])
orig_sys_metadata = dict(sys_metadata)
# Remove the old keys
for key in sys_metadata.keys():
if key.startswith(SM_IMAGE_PROP_PREFIX):
del sys_metadata[key]
# Add the new ones
for key, value in image.get('properties', {}).iteritems():
new_value = unicode(value)[:255]
sys_metadata[(SM_IMAGE_PROP_PREFIX + '%s') % key] = new_value
self.db.instance_system_metadata_update(context,
instance['uuid'], sys_metadata, True)
return orig_sys_metadata
instance = self.update(context, instance,
task_state=task_states.REBUILDING,
expected_task_state=None,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href, kernel_id=kernel_id or "",
ramdisk_id=ramdisk_id or "",
progress=0, **kwargs)
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance['uuid']))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(elevated,
instance['uuid'], 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration_ref)
reservations = self._reserve_quota_delta(context, deltas)
instance = self.update(context, instance,
task_state=task_states.RESIZE_REVERTING,
expected_task_state=None)
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'reverting'})
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration_ref=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration_ref is None:
migration_ref = self.db.migration_get_by_instance_and_status(
elevated, instance['uuid'], 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'confirming'})
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['source_compute'],
reservations=reservations)
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_instance_type[resource] -
old_instance_type[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_instance_type, old_instance_type):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_instance_type = flavors.get_flavor(
migration_ref['old_instance_type_id'])
new_instance_type = flavors.get_flavor(
migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_instance_type = flavors.extract_flavor(instance,
'old_')
new_instance_type = flavors.extract_flavor(instance,
'new_')
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None, **kwargs):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
# FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=flavor_id)
# NOTE(markwash): look up the image early to avoid auth problems later
image_ref = instance.get('image_ref')
if image_ref:
image = self.image_service.show(context, image_ref)
else:
image = {}
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance = self.update(context, instance,
task_state=task_states.RESIZE_PREP,
expected_task_state=None,
progress=0, **kwargs)
request_spec = {
'instance_type': new_instance_type,
'instance_uuids': [instance['uuid']],
'instance_properties': instance,
'image': image}
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations,
project_id=instance['project_id'])
reservations = []
args = {
"instance": instance,
"instance_type": new_instance_type,
"image": image,
"request_spec": jsonutils.to_primitive(request_spec),
"filter_properties": filter_properties,
"reservations": reservations,
}
self._record_action_start(context, instance, instance_actions.RESIZE)
self.scheduler_rpcapi.prep_resize(context, **args)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
bdms = self.get_instance_bdms(context, instance)
if not self.is_volume_backed_instance(context, instance, bdms):
name = '%s-shelved' % instance['name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=None)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
self.update(context,
instance,
task_state=task_states.PAUSING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
self.update(context,
instance,
task_state=task_states.UNPAUSING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance=instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
self.update(context,
instance,
task_state=task_states.SUSPENDING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
self.update(context,
instance,
task_state=task_states.RESUMING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, None):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
context = context.elevated()
instance_uuid = instance['uuid']
LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=True)
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
context = context.elevated()
instance_uuid = instance['uuid']
LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=False)
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
"""Get all metadata."""
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
(k not in keys_filter) and (v not in values_filter)):
continue
# Only keys or value is defined
elif ((keys_filter and k not in keys_filter) or
(values_filter and v not in values_filter)):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_metadata', instance)
metadata = instance.get('metadata', {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = utils.diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance)
for bdm in bdms:
if ((block_device.strip_dev(bdm['device_name']) ==
block_device.strip_dev(instance['root_device_name']))
and
(bdm['volume_id'] is not None or
bdm['snapshot_id'] is not None)):
return True
return False
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
expected_task_state=None)
self.compute_task_api.migrate_server(context, instance,
scheduler_hint={'host': host_name},
live=True, rebuild=False, flavor=None,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = self.db.service_get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceUnavailable(msg)
instance = self.update(context, instance, expected_task_state=None,
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return self.db.migration_get_all_by_filters(context, filters)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = self.db.service_get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_power_action(context, action=action,
host=host_name)
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(context, disabled=disabled)
if set_zones or 'availability_zone' in filters:
services = availability_zones.set_availability_zones(context,
services)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return self.db.service_get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = db.service_get_by_args(context, host_name, binary)
return db.service_update(context, service['id'], params_to_update)
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return self.db.actions_get(context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return self.db.action_get_by_request_id(context, instance['uuid'],
request_id)
def action_events_get(self, context, instance, action_id):
return self.db.action_events_get(context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate_payload = {}
values = {"name": aggregate_name}
aggregate_payload.update(values)
metadata = None
if availability_zone:
metadata = {'availability_zone': availability_zone}
aggregate_payload.update({'meta_data': metadata})
compute_utils.notify_about_aggregate_update(context,
"create.start",
aggregate_payload)
aggregate = self.db.aggregate_create(context, values,
metadata=metadata)
aggregate = self._get_aggregate_info(context, aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
aggregate_payload.update({'aggregate_id': aggregate['id']})
compute_utils.notify_about_aggregate_update(context,
"create.end",
aggregate_payload)
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = self.db.aggregate_get(context, aggregate_id)
return self._get_aggregate_info(context, aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = self.db.aggregate_get_all(context)
return [self._get_aggregate_info(context, a) for a in aggregates]
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
aggregate_payload.update({'meta_data': values})
compute_utils.notify_about_aggregate_update(context,
"updateprop.start",
aggregate_payload)
aggregate = self.db.aggregate_update(context, aggregate_id, values)
compute_utils.notify_about_aggregate_update(context,
"updateprop.end",
aggregate_payload)
return self._get_aggregate_info(context, aggregate)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate_payload = {'aggregate_id': aggregate_id}
aggregate_payload.update({'meta_data': metadata})
compute_utils.notify_about_aggregate_update(context,
"updatemetadata.start",
aggregate_payload)
# If a key is set to None, it gets removed from the aggregate metadata.
for key in metadata.keys():
if not metadata[key]:
try:
self.db.aggregate_metadata_delete(context,
aggregate_id, key)
metadata.pop(key)
except exception.AggregateMetadataNotFound as e:
LOG.warn(e.format_message())
self.db.aggregate_metadata_add(context, aggregate_id, metadata)
compute_utils.notify_about_aggregate_update(context,
"updatemetadata.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
hosts = self.db.aggregate_host_get_all(context, aggregate_id)
if len(hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
self.db.aggregate_delete(context, aggregate_id)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
metadata = self.db.aggregate_metadata_get(context, aggregate['id'])
hosts = self.db.aggregate_host_get_all(context, aggregate['id'])
result = dict(aggregate.iteritems())
# metadetails was not originally included here. We need to pull it
# back out to maintain API stability.
del result['metadetails']
result["metadata"] = metadata
result["hosts"] = hosts
return result
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
_('Keypair name must be between 1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = {'user_id': user_id,
'name': key_name,
'fingerprint': fingerprint,
'public_key': public_key}
self.db.key_pair_create(context, keypair)
return keypair
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = {'user_id': user_id,
'name': key_name,
'fingerprint': fingerprint,
'public_key': public_key,
'private_key': private_key}
self.db.key_pair_create(context, keypair)
return keypair
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self.db.key_pair_destroy(context, user_id, key_name)
def _get_key_pair(self, key_pair):
return {'name': key_pair['name'],
'public_key': key_pair['public_key'],
'fingerprint': key_pair['fingerprint']}
def get_key_pairs(self, context, user_id):
"""List key pairs."""
key_pairs = self.db.key_pair_get_all_by_user(context, user_id)
return [self._get_key_pair(k) for k in key_pairs]
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
key_pair = self.db.key_pair_get(context, user_id, key_name)
return self._get_key_pair(key_pair)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova seurity group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
group_ref = self.db.security_group_update(context,
security_group['id'],
group)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
security group rules at once but the EC2 one does. Therefore,
this function is writen to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
instance['security_groups'] = security_groups
| apache-2.0 | 9,132,316,229,346,046,000 | 42.558882 | 79 | 0.566899 | false |
ArcherSys/ArcherSys | Lib/lib2to3/tests/pytree_idempotency.py | 1 | 7469 | <<<<<<< HEAD
<<<<<<< HEAD
#!/usr/bin/env python3
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
from __future__ import print_function
__author__ = "Guido van Rossum <[email protected]>"
# Support imports (need to be imported first)
from . import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except OSError:
continue
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print("No problems. Congratulations!")
else:
print("Problems in following files:")
for fn in problems:
print("***", fn)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
=======
#!/usr/bin/env python3
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
from __future__ import print_function
__author__ = "Guido van Rossum <[email protected]>"
# Support imports (need to be imported first)
from . import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except OSError:
continue
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print("No problems. Congratulations!")
else:
print("Problems in following files:")
for fn in problems:
print("***", fn)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!/usr/bin/env python3
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
from __future__ import print_function
__author__ = "Guido van Rossum <[email protected]>"
# Support imports (need to be imported first)
from . import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except OSError:
continue
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print("No problems. Congratulations!")
else:
print("Problems in following files:")
for fn in problems:
print("***", fn)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -5,725,509,200,969,174,000 | 24.934028 | 75 | 0.56286 | false |
deepmind/rlax | rlax/_src/policy_gradients.py | 1 | 10472 | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing policy gradient losses.
Policy gradient algorithms directly update the policy of an agent based on
a stochatic estimate of the direction of steepest ascent in a score function
representing the expected return of that policy. This subpackage provides a
number of utility functions for implementing policy gradient algorithms for
discrete and continuous policies.
"""
from typing import Optional
import chex
import jax
import jax.numpy as jnp
from rlax._src import distributions
from rlax._src import losses
Array = chex.Array
Scalar = chex.Scalar
def _clip_by_l2_norm(x: Array, max_norm: float) -> Array:
"""Clip gradients to maximum l2 norm `max_norm`."""
# Compute the sum of squares and find out where things are zero.
sum_sq = jnp.sum(jnp.vdot(x, x))
nonzero = sum_sq > 0
# Compute the norm wherever sum_sq > 0 and leave it <= 0 otherwise. This makes
# use of the the "double where" trick; see
# https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where
# for more info. In short this is necessary because although norm ends up
# computed correctly where nonzero is true if we ignored this we'd end up with
# nans on the off-branches which would leak through when computed gradients in
# the backward pass.
sum_sq_ones = jnp.where(nonzero, sum_sq, jnp.ones_like(sum_sq))
norm = jnp.where(nonzero, jnp.sqrt(sum_sq_ones), sum_sq)
# Normalize by max_norm. Whenever norm < max_norm we're left with x (this
# happens trivially for indices where nonzero is false). Otherwise we're left
# with the desired x * max_norm / norm.
return (x * max_norm) / jnp.maximum(norm, max_norm)
def dpg_loss(
a_t: Array,
dqda_t: Array,
dqda_clipping: Optional[Scalar] = None,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the deterministic policy gradient (DPG) loss.
See "Deterministic Policy Gradient Algorithms" by Silver, Lever, Heess,
Degris, Wierstra, Riedmiller (http://proceedings.mlr.press/v32/silver14.pdf).
Args:
a_t: continuous-valued action at time t.
dqda_t: gradient of Q(s,a) wrt. a, evaluated at time t.
dqda_clipping: clips the gradient to have norm <= `dqda_clipping`.
use_stop_gradient: bool indicating whether or not to apply stop gradient
to targets.
Returns:
DPG loss.
"""
chex.assert_rank([a_t, dqda_t], 1)
chex.assert_type([a_t, dqda_t], float)
if dqda_clipping is not None:
dqda_t = _clip_by_l2_norm(dqda_t, dqda_clipping)
target_tm1 = dqda_t + a_t
target_tm1 = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(target_tm1), target_tm1)
return losses.l2_loss(target_tm1 - a_t)
def policy_gradient_loss(
logits_t: Array,
a_t: Array,
adv_t: Array,
w_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the policy gradient loss.
See "Simple Gradient-Following Algorithms for Connectionist RL" by Williams.
(http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
a_t: a sequence of actions sampled from the preferences `logits_t`.
adv_t: the observed or estimated advantages from executing actions `a_t`.
w_t: a per timestep weighting for the loss.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a policy gradient update.
"""
chex.assert_rank([logits_t, a_t, adv_t, w_t], [2, 1, 1, 1])
chex.assert_type([logits_t, a_t, adv_t, w_t], [float, int, float, float])
log_pi_a_t = distributions.softmax().logprob(a_t, logits_t)
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
loss_per_timestep = -log_pi_a_t * adv_t
return jnp.mean(loss_per_timestep * w_t)
def entropy_loss(
logits_t: Array,
w_t: Array,
) -> Array:
"""Calculates the entropy regularization loss.
See "Function Optimization using Connectionist RL Algorithms" by Williams.
(https://www.tandfonline.com/doi/abs/10.1080/09540099108946587)
Args:
logits_t: a sequence of unnormalized action preferences.
w_t: a per timestep weighting for the loss.
Returns:
Entropy loss.
"""
chex.assert_rank([logits_t, w_t], [2, 1])
chex.assert_type([logits_t, w_t], float)
entropy_per_timestep = distributions.softmax().entropy(logits_t)
return -jnp.mean(entropy_per_timestep * w_t)
def _compute_advantages(logits_t: Array,
q_t: Array,
use_stop_gradient=True) -> Array:
"""Computes summed advantage using logits and action values."""
policy_t = jax.nn.softmax(logits_t, axis=1)
# Avoid computing gradients for action_values.
q_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(q_t), q_t)
baseline_t = jnp.sum(policy_t * q_t, axis=1)
adv_t = q_t - jnp.expand_dims(baseline_t, 1)
return policy_t, adv_t
def qpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the QPG (Q-based Policy Gradient) loss.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
QPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
advantage_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(advantage_t), advantage_t)
policy_advantages = -policy_t * advantage_t
loss = jnp.mean(jnp.sum(policy_advantages, axis=1), axis=0)
return loss
def rm_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RMPG (Regret Matching Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with thresholded regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RM Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
action_regret_t = jax.nn.relu(advantage_t)
action_regret_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(action_regret_t),
action_regret_t)
policy_regret = -policy_t * action_regret_t
loss = jnp.mean(jnp.sum(policy_regret, axis=1), axis=0)
return loss
def rpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RPG (Regret Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
_, adv_t = _compute_advantages(logits_t, q_t, use_stop_gradient)
regrets_t = jnp.sum(jax.nn.relu(adv_t), axis=1)
total_regret_t = jnp.mean(regrets_t, axis=0)
return total_regret_t
def clipped_surrogate_pg_loss(
prob_ratios_t: Array,
adv_t: Array,
epsilon: Scalar,
use_stop_gradient=True) -> Array:
"""Computes the clipped surrogate policy gradient loss.
L_clipₜ(θ) = - min(rₜ(θ)Âₜ, clip(rₜ(θ), 1-ε, 1+ε)Âₜ)
Where rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ) and Âₜ are the advantages.
See Proximal Policy Optimization Algorithms, Schulman et al.:
https://arxiv.org/abs/1707.06347
Args:
prob_ratios_t: Ratio of action probabilities for actions a_t:
rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ)
adv_t: the observed or estimated advantages from executing actions a_t.
epsilon: Scalar value corresponding to how much to clip the objecctive.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a clipped surrogate policy gradient
update.
"""
chex.assert_rank([prob_ratios_t, adv_t], [1, 1])
chex.assert_type([prob_ratios_t, adv_t], [float, float])
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
clipped_ratios_t = jnp.clip(prob_ratios_t, 1. - epsilon, 1. + epsilon)
clipped_objective = jnp.fmin(prob_ratios_t * adv_t, clipped_ratios_t * adv_t)
return -jnp.mean(clipped_objective)
| apache-2.0 | 380,960,590,882,449,540 | 34.569966 | 122 | 0.690942 | false |
openstack/vitrage | vitrage/api/controllers/v1/alarm.py | 1 | 2404 | # Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pecan
from oslo_log import log
from osprofiler import profiler
from pecan.core import abort
from vitrage.api.controllers.v1.alarm_base import BaseAlarmsController
from vitrage.api.controllers.v1 import count
from vitrage.api.controllers.v1 import history
from vitrage.api.policy import enforce
from vitrage.common.constants import VertexProperties as Vprops
LOG = log.getLogger(__name__)
# noinspection PyBroadException
@profiler.trace_cls("alarm controller",
info={}, hide_args=False, trace_private=False)
class AlarmsController(BaseAlarmsController):
count = count.AlarmCountsController()
history = history.HistoryController()
@pecan.expose('json')
def get_all(self, **kwargs):
kwargs['only_active_alarms'] = True
LOG.info('returns alarms list with vitrage id %s',
kwargs.get(Vprops.VITRAGE_ID))
return self._get_alarms(**kwargs)
@pecan.expose('json')
def get(self, vitrage_id):
enforce("get alarm",
pecan.request.headers,
pecan.request.enforcer,
{})
LOG.info('returns show alarm with vitrage id %s', vitrage_id)
return self._show_alarm(vitrage_id)
@staticmethod
def _show_alarm(vitrage_id):
try:
alarm_json = pecan.request.client.call(pecan.request.context,
'show_alarm',
vitrage_id=vitrage_id)
LOG.info(alarm_json)
if not alarm_json:
abort(404, "Failed to find alarm %s" % vitrage_id)
return json.loads(alarm_json)
except Exception:
LOG.exception('Failed to load JSON.')
abort(404, 'Failed to show alarm.')
| apache-2.0 | -3,731,942,575,143,812,600 | 30.631579 | 75 | 0.649334 | false |
neerajvashistha/pa-dude | lib/python2.7/site-packages/sphinx/theming.py | 1 | 8278 | # -*- coding: utf-8 -*-
"""
sphinx.theming
~~~~~~~~~~~~~~
Theming support for HTML builders.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import shutil
import zipfile
import tempfile
from os import path
from six import string_types, iteritems
from six.moves import configparser
try:
import pkg_resources
except ImportError:
pkg_resources = False
from sphinx import package_dir
from sphinx.errors import ThemeError
NODEFAULT = object()
THEMECONF = 'theme.conf'
class Theme(object):
"""
Represents the theme chosen in the configuration.
"""
themes = {}
@classmethod
def init_themes(cls, confdir, theme_path, warn=None):
"""Search all theme paths for available themes."""
cls.themepath = list(theme_path)
cls.themepath.append(path.join(package_dir, 'themes'))
for themedir in cls.themepath[::-1]:
themedir = path.join(confdir, themedir)
if not path.isdir(themedir):
continue
for theme in os.listdir(themedir):
if theme.lower().endswith('.zip'):
try:
zfile = zipfile.ZipFile(path.join(themedir, theme))
if THEMECONF not in zfile.namelist():
continue
tname = theme[:-4]
tinfo = zfile
except Exception:
if warn:
warn('file %r on theme path is not a valid '
'zipfile or contains no theme' % theme)
continue
else:
if not path.isfile(path.join(themedir, theme, THEMECONF)):
continue
tname = theme
tinfo = None
cls.themes[tname] = (path.join(themedir, theme), tinfo)
@classmethod
def load_extra_theme(cls, name):
themes = ['alabaster']
try:
import sphinx_rtd_theme
themes.append('sphinx_rtd_theme')
except ImportError:
pass
if name in themes:
if name == 'alabaster':
import alabaster
themedir = alabaster.get_path()
# alabaster theme also requires 'alabaster' extension, it will be loaded
# at sphinx.application module.
elif name == 'sphinx_rtd_theme':
themedir = sphinx_rtd_theme.get_html_theme_path()
else:
raise NotImplementedError('Programming Error')
else:
for themedir in load_theme_plugins():
if path.isfile(path.join(themedir, name, THEMECONF)):
break
else:
# specified theme is not found
return
cls.themepath.append(themedir)
cls.themes[name] = (path.join(themedir, name), None)
return
def __init__(self, name, warn=None):
if name not in self.themes:
self.load_extra_theme(name)
if name not in self.themes:
if name == 'sphinx_rtd_theme':
raise ThemeError('sphinx_rtd_theme is no longer a hard dependency '
'since version 1.4.0. Please install it manually.'
'(pip install sphinx_rtd_theme)')
else:
raise ThemeError('no theme named %r found '
'(missing theme.conf?)' % name)
self.name = name
# Do not warn yet -- to be compatible with old Sphinxes, people *have*
# to use "default".
# if name == 'default' and warn:
# warn("'default' html theme has been renamed to 'classic'. "
# "Please change your html_theme setting either to "
# "the new 'alabaster' default theme, or to 'classic' "
# "to keep using the old default.")
tdir, tinfo = self.themes[name]
if tinfo is None:
# already a directory, do nothing
self.themedir = tdir
self.themedir_created = False
else:
# extract the theme to a temp directory
self.themedir = tempfile.mkdtemp('sxt')
self.themedir_created = True
for name in tinfo.namelist():
if name.endswith('/'):
continue
dirname = path.dirname(name)
if not path.isdir(path.join(self.themedir, dirname)):
os.makedirs(path.join(self.themedir, dirname))
fp = open(path.join(self.themedir, name), 'wb')
fp.write(tinfo.read(name))
fp.close()
self.themeconf = configparser.RawConfigParser()
self.themeconf.read(path.join(self.themedir, THEMECONF))
try:
inherit = self.themeconf.get('theme', 'inherit')
except configparser.NoOptionError:
raise ThemeError('theme %r doesn\'t have "inherit" setting' % name)
# load inherited theme automatically #1794, #1884, #1885
self.load_extra_theme(inherit)
if inherit == 'none':
self.base = None
elif inherit not in self.themes:
raise ThemeError('no theme named %r found, inherited by %r' %
(inherit, name))
else:
self.base = Theme(inherit, warn=warn)
def get_confstr(self, section, name, default=NODEFAULT):
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
return self.themeconf.get(section, name)
except (configparser.NoOptionError, configparser.NoSectionError):
if self.base is not None:
return self.base.get_confstr(section, name, default)
if default is NODEFAULT:
raise ThemeError('setting %s.%s occurs in none of the '
'searched theme configs' % (section, name))
else:
return default
def get_options(self, overrides):
"""Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
chain.append(base.themeconf)
base = base.base
options = {}
for conf in reversed(chain):
try:
options.update(conf.items('options'))
except configparser.NoSectionError:
pass
for option, value in iteritems(overrides):
if option not in options:
raise ThemeError('unsupported theme option %r given' % option)
options[option] = value
return options
def get_dirchain(self):
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
chain = [self.themedir]
base = self.base
while base is not None:
chain.append(base.themedir)
base = base.base
return chain
def cleanup(self):
"""Remove temporary directories."""
if self.themedir_created:
try:
shutil.rmtree(self.themedir)
except Exception:
pass
if self.base:
self.base.cleanup()
def load_theme_plugins():
"""load plugins by using``sphinx_themes`` section in setuptools entry_points.
This API will return list of directory that contain some theme directory.
"""
if not pkg_resources:
return []
theme_paths = []
for plugin in pkg_resources.iter_entry_points('sphinx_themes'):
func_or_path = plugin.load()
try:
path = func_or_path()
except Exception:
path = func_or_path
if isinstance(path, string_types):
theme_paths.append(path)
else:
raise ThemeError('Plugin %r does not response correctly.' %
plugin.module_name)
return theme_paths
| mit | 4,504,715,810,136,291,300 | 33.781513 | 88 | 0.539381 | false |
SylvainA/os-event-catcher | openstack/common/rpc/common.py | 1 | 18537 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
import six
from openstack.common.gettextutils import _, _LE
from openstack.common import importutils
from openstack.common import jsonutils
from openstack.common import local
from openstack.common import log as logging
from openstack.common import versionutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_RPC_ENVELOPE_VERSION = '2.0'
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is::
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in six.iterkeys(d):
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], list):
for e in d[k]:
if isinstance(e, dict):
_fix_passwords(e)
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
# TODO(sirp): we should deprecate this in favor of
# using `versionutils.is_compatible` directly
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
return versionutils.is_compatible(version, imp_version)
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
| apache-2.0 | 7,062,735,833,508,406,000 | 35.490157 | 79 | 0.644657 | false |
rwl/PyCIM | CIM14/CDPSM/Balanced/IEC61970/Wires/EnergySource.py | 1 | 2528 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Balanced.IEC61970.Core.ConductingEquipment import ConductingEquipment
class EnergySource(ConductingEquipment):
"""A generic equivalent for an energy supplier on a transmission or distribution voltage level.
"""
def __init__(self, x=0.0, voltageMagnitude=0.0, voltageAngle=0.0, nominalVoltage=0.0, *args, **kw_args):
"""Initialises a new 'EnergySource' instance.
@param x: Positive sequence Thevenin reactance.
@param voltageMagnitude: Phase-to-phase open circuit voltage magnitude.
@param voltageAngle: Phase angle of a-phase open circuit.
@param nominalVoltage: Phase-to-phase nominal voltage.
"""
#: Positive sequence Thevenin reactance.
self.x = x
#: Phase-to-phase open circuit voltage magnitude.
self.voltageMagnitude = voltageMagnitude
#: Phase angle of a-phase open circuit.
self.voltageAngle = voltageAngle
#: Phase-to-phase nominal voltage.
self.nominalVoltage = nominalVoltage
super(EnergySource, self).__init__(*args, **kw_args)
_attrs = ["x", "voltageMagnitude", "voltageAngle", "nominalVoltage"]
_attr_types = {"x": float, "voltageMagnitude": float, "voltageAngle": float, "nominalVoltage": float}
_defaults = {"x": 0.0, "voltageMagnitude": 0.0, "voltageAngle": 0.0, "nominalVoltage": 0.0}
_enums = {}
_refs = []
_many_refs = []
| mit | -1,110,787,457,818,717,400 | 44.963636 | 108 | 0.714794 | false |
trilan/lemon | lemon/forms.py | 1 | 2292 | from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.forms.formsets import formset_factory
from django.forms.models import BaseInlineFormSet
from django.forms.models import ModelFormMetaclass, _get_foreign_key
from django.utils.translation import ugettext_lazy as _
from .fields import ContentTypeChoiceField
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_('Username'),
max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_(
'Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'
),
error_messages={
'invalid': _(
'This value may contain only letters, numbers and @/./+/-/_ '
'characters.'
)
}
)
class Meta:
model = User
class MenuItemForm(forms.ModelForm):
admin_site = None
class Meta(object):
fields = ['content_type', 'name', 'position']
def __init__(self, *args, **kwargs):
qs = ContentType.objects.all()
content_type = ContentTypeChoiceField(
self.admin_site, qs, label=_('content type')
)
self.base_fields['content_type'] = content_type
super(MenuItemForm, self).__init__(*args, **kwargs)
formfield_callback = lambda f: f.formfield()
def contenttype_inlineformset_factory(parent_model, model, admin_site,
formfield_callback,
extra=3, can_order=False,
can_delete=True, max_num=0):
fk = _get_foreign_key(parent_model, model)
Meta = type('Meta', (MenuItemForm.Meta,), {'model': model})
class_name = model.__name__ + 'Form'
form_class_attrs = {
'admin_site': admin_site,
'Meta': Meta,
'formfield_callback': formfield_callback
}
form = ModelFormMetaclass(class_name, (MenuItemForm,), form_class_attrs)
FormSet = formset_factory(form, BaseInlineFormSet, extra=extra,
max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
FormSet.fk = fk
return FormSet
| bsd-3-clause | 1,137,810,636,647,366,900 | 31.28169 | 77 | 0.58726 | false |
gmsanchez/nmpc_comparison | cstr_startup_colloc.py | 1 | 9778 | # Linear and nonlinear control of startup of a CSTR.
import mpctools as mpc
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import time
# Define some parameters and then the CSTR model.
Nx = 3
Nu = 2
Nd = 1
# Ny = Nx
Delta = .25
# eps = 1e-6 # Use this as a small number.
T0 = 350
c0 = 1
r = .219
k0 = 7.2e10
E = 8750
U = 54.94
rho = 1000
Cp = .239
dH = -5e4
def ode(x,u,d):
# Grab the states, controls, and disturbance. We would like to write
#
# [c, T, h] = x[0:Nx]
# [Tc, F] = u[0:Nu]
# [F0] = d[0:Nd]
#
# but this doesn't work in Casadi 3.0. So, we're stuck with the following:
c = x[0]
T = x[1]
h = x[2]
Tc = u[0]
F = u[1]
F0 = d[0]
# Now create the ODE.
rate = k0*c*np.exp(-E/T)
dxdt = np.array([
F0*(c0 - c)/(np.pi*r**2*h) - rate,
F0*(T0 - T)/(np.pi*r**2*h)
- dH/(rho*Cp)*rate
+ 2*U/(r*rho*Cp)*(Tc - T),
(F0 - F)/(np.pi*r**2)
])
return dxdt
# Turn into casadi function and simulator.
ode_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="ode")
ode_rk4_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],
funcname="ode_rk4",rk4=False,Delta=Delta)
cstr = mpc.DiscreteSimulator(ode, Delta, [Nx,Nu,Nd], ["x","u","d"])
# Steady-state values.
cs = .878
Ts = 324.5
hs = .659
Fs = .1
Tcs = 300
F0s = .1
# Update the steady-state values a few times to make sure they don't move.
for i in range(10):
[cs,Ts,hs] = cstr.sim([cs,Ts,hs],[Tcs,Fs],[F0s]).tolist()
xs = np.array([cs,Ts,hs])
us = np.array([Tcs,Fs])
ds = np.array([F0s])
# Now get a linearization at this steady state.
#ss = mpc.util.getLinearizedModel(ode_casadi, [xs,us,ds], ["A","B","Bp"], Delta)
#A = ss["A"]
#B = ss["B"]
#Bp = ss["Bp"]
#C = np.eye(Nx)
# Weighting matrices for controller.
Q = .5*np.diag(xs**-2)
R = 2*np.diag(us**-2)
# model_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="cstr")
#[K, Pi] = mpc.util.dlqr(A,B,Q,R)
# Define casadi functions.
Fnonlinear = ode_rk4_casadi
# def measurement(x,d):
# return x
# h = mpc.getCasadiFunc(measurement,[Nx,Nd],["x","d"],funcname="h")
#def linmodel(x,u,d):
# Ax = mpc.mtimes(A,x-xs) + xs
# Bu = mpc.mtimes(B,u-us)
# Bpd = mpc.mtimes(Bp,d-ds)
# return Ax + Bu + Bpd
#Flinear = mpc.getCasadiFunc(linmodel,[Nx,Nu,Nd],["x","u","d"],funcname="F")
def stagecost(x,u,xsp,usp,Q,R):
# Return deviation variables.
dx = x - xsp
du = u - usp
# Calculate stage cost.
return mpc.mtimes(dx.T,Q,dx) + mpc.mtimes(du.T,R,du)
largs = ["x","u","x_sp","u_sp","Q","R"]
l = mpc.getCasadiFunc(stagecost,[Nx,Nu,Nx,Nu,(Nx,Nx),(Nu,Nu)],largs,
funcname="l")
def costtogo(x,xsp):
# Deviation variables.
dx = x - xsp
# Calculate cost to go.
return mpc.mtimes(dx.T,10*Q,dx)
Pf = mpc.getCasadiFunc(costtogo,[Nx,Nx],["x","s_xp"],funcname="Pf")
# First see what happens if we try to start up the reactor under no control.
Nsim = 100
x0 = np.array([.05*cs,.75*Ts,.5*hs])
xcl = {}
ucl = {}
xcl["uncont"] = np.zeros((Nsim+1,Nx))
xcl["uncont"][0,:] = x0
ucl["uncont"] = np.tile(us,(Nsim,1))
for t in range(Nsim):
xcl["uncont"][t+1,:] = cstr.sim(xcl["uncont"][t,:],ucl["uncont"][t,:],ds)
# Build a solver for the linear and nonlinear models.
Nt = 15
sp = {"x" : np.tile(xs, (Nt+1,1)), "u" : np.tile(us, (Nt,1))}
#xguesslin = np.zeros((Nt+1,Nx))
#xguesslin[0,:] = x0
#for t in range(Nt):
# xguesslin[t+1,:] = A.dot(xguesslin[t,:] - xs) + xs
#guesslin = {"x" : xguesslin, "u" : np.tile(us,(Nt,1))}
guessnonlin = sp.copy()
# Control bounds.
umax = np.array([.05*Tcs,.15*Fs])
dumax = .2*umax # Maximum for rate-of-change.
bounds = dict(uub=[us + umax],ulb=[us - umax])
ub = {"u" : np.tile(us + umax, (Nt,1)), "Du" : np.tile(dumax, (Nt,1))}
lb = {"u" : np.tile(us - umax, (Nt,1)), "Du" : np.tile(-dumax, (Nt,1))}
N = {"x":Nx, "u":Nu, "p":Nd, "t":Nt, "c":3}
p = np.tile(ds, (Nt,1)) # Parameters for system.
nmpc_commonargs = {
"N" : N,
"Delta": Delta,
"x0" : x0,
"lb" : lb,
"ub" : ub,
"p" : p,
"verbosity" : 0,
"Pf" : Pf,
"l" : l,
"sp" : sp,
"uprev" : us,
"funcargs" : {"l" : largs},
"extrapar" : {"Q" : Q, "R" : R}, # In case we want to tune online.
}
solvers = {}
# solvers["lmpc"] = mpc.nmpc(f=Flinear,guess=guesslin,**nmpc_commonargs)
solvers["nmpc"] = mpc.nmpc(f=Fnonlinear,guess=guessnonlin,**nmpc_commonargs)
# Also build steady-state target finders.
contVars = [0,2]
#sstarg_commonargs = {
# "N" : N,
# "lb" : {"u" : np.tile(us - umax, (1,1))},
# "ub" : {"u" : np.tile(us + umax, (1,1))},
# "verbosity" : 0,
## "h" : h,
# "p" : np.array([ds]),
#}
#sstargs = {}
# sstargs["lmpc"] = mpc.sstarg(f=Flinear,**sstarg_commonargs)
# sstargs["nmpc"] = mpc.sstarg(f=Fnonlinear,**sstarg_commonargs)
# Now simulate the process under control.
tcl = {}
for method in solvers.keys():
xcl[method] = np.zeros((Nsim+1,Nx))
xcl[method][0,:] = x0
tcl[method] = np.zeros((Nsim+1,1))
thisx = x0
ucl[method] = np.zeros((Nsim,Nu))
# ysp = np.tile(xs,(Nsim+1,1))
xsp = np.zeros((Nsim+1,Nx))
usp = np.zeros((Nsim,Nu))
# ysp[int(Nsim/3):int(2*Nsim/3),:] = xs*np.array([.85,.75,1.15])
for t in range(Nsim):
# Figure out setpoints.
# if t == 0 or not np.all(ysp[t,:] == ysp[t-1,:]):
# thisysp = ysp[t,:]
# sstargs[method].fixvar("y",0,thisysp[contVars],contVars)
# sstargs[method].guess["u",0] = us
# sstargs[method].guess["x",0] = thisysp
# sstargs[method].guess["y",0] = thisysp
# sstargs[method].solve()
#
# print "%10s %3d: %s" % ("sstarg",t,sstargs[method].stats["status"])
# if sstargs[method].stats["status"] != "Solve_Succeeded":
# print "***Target finder failed!"
# break
#
# xsp[t,:] = np.squeeze(sstargs[method].var["x",0])
# usp[t,:] = np.squeeze(sstargs[method].var["u",0])
#
# solvers[method].par["x_sp"] = [xsp[t,:]]*(Nt + 1)
# solvers[method].par["u_sp"] = [usp[t,:]]*Nt
# Fix initial condition and solve.
t0 = time.time()
solvers[method].fixvar("x",0,thisx)
solvers[method].solve()
print "%10s %3d: %s" % (method,t,solvers[method].stats["status"])
if solvers[method].stats["status"] != "Solve_Succeeded":
print "***Solver failed!"
break
else:
solvers[method].saveguess()
thisu = np.squeeze(solvers[method].var["u"][0])
ucl[method][t,:] = thisu
t1 = time.time()
tcl[method][t] = t1-t0
thisx = cstr.sim(thisx,thisu,ds)
xcl[method][t+1,:] = thisx
# Update previous u.
solvers[method].par["u_prev",0] = ucl[method][t,:]
# Define plotting function.
def cstrplot(x,u,xsp=None,contVars=[],title=None,colors={},labels={},
markers={},keys=None,bounds=None,ilegend=0):
if keys is None:
keys = x.keys()
for k in keys:
u[k] = np.concatenate((u[k],u[k][-1:,:]))
ylabelsx = ["$c$ (mol/L)", "$T$ (K)", "$h$ (m)"]
ylabelsu = ["$T_c$ (K)", "$F$ (kL/min)"]
gs = gridspec.GridSpec(Nx*Nu,2)
fig = plt.figure(figsize=(10,6),facecolor="none")
leglines = []
leglabels = []
for i in range(Nx):
ax = fig.add_subplot(gs[i*Nu:(i+1)*Nu,0])
for k in keys:
t = np.arange(0,x[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k),
"marker":markers.get(k,"")}
[line] = ax.plot(t,x[k][:,i],markeredgecolor="none",**args)
if i == ilegend:
leglines.append(line)
leglabels.append(args["label"])
if i in contVars and xsp is not None:
ax.step(t,xsp[:,i],linestyle="--",color="black",where="post")
ax.set_ylabel(ylabelsx[i])
mpc.plots.zoomaxis(ax,yscale=1.1)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
for i in range(Nu):
ax = fig.add_subplot(gs[i*Nx:(i+1)*Nx,1])
for k in keys:
t = np.arange(0,u[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k)}
ax.step(t,u[k][:,i],where="post",**args)
if bounds is not None:
for b in set(["uub", "ulb"]).intersection(bounds.keys()):
ax.plot(np.array([t[0],t[-1]]),np.ones((2,))*bounds[b][i],
'--k')
ax.set_ylabel(ylabelsu[i])
mpc.plots.zoomaxis(ax,yscale=1.25)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
fig.legend(leglines,leglabels,loc="lower center",ncol=len(keys))
fig.tight_layout(pad=.5,rect=(0,.075,1,1))
if title is not None:
fig.canvas.set_window_title(title)
return fig
x = xcl['nmpc']
u = ucl['nmpc']
ptimes = tcl['nmpc']
# Make plots.
keys = ["uncont", "nmpc"]
colors = {"lmpc":"blue", "nmpc":"green", "uncont":"red"}
labels = {"lmpc":"LMPC", "nmpc":"NMPC", "uncont":"Uncontrolled"}
markers = {"lmpc":"s", "nmpc":"o", "uncont":"^"}
plotbounds = dict([(k,bounds[k][0]) for k in ["ulb","uub"]])
fig = cstrplot(xcl, ucl, colors=colors, contVars=contVars, labels=labels,
keys=keys, markers={}, bounds=plotbounds, ilegend=2)
fig.show()
# mpc.plots.showandsave(fig,"cstr_startup.pdf",facecolor="none")
| gpl-3.0 | 3,749,407,622,004,102,700 | 30.850163 | 80 | 0.54009 | false |
bmazin/ARCONS-pipeline | photonlist/test/testExpTimeWeight.py | 1 | 1192 | '''
Author: Julian van Eyken Date: Jul 9 2013
Test code for per-pixel effective exposure time weighting.
'''
import os.path
import photonlist.photlist as pl
import photonlist.RADecImage as rdi
from util.FileName import FileName
def getSimpleImage(fileName=FileName(run='PAL2012',date='20121211',tstamp='20121212-033323').photonList(),
firstSec=0, integrationTime=5, wvlMin=None, wvlMax=None, doWeighted=True):
'''
Get a simple short-exposure time RA/dec-mapped image, for
the purposes of looking at the per-pixel effective integration
time weighting.
'''
virtualImage = rdi.RADecImage(vPlateScale=0.1)
print 'Loading: ',os.path.basename(fileName)
phList = pl.PhotList(fileName)
baseSaveName,ext=os.path.splitext(os.path.basename(fileName))
imSaveName=baseSaveName+'.tif'
virtualImage.loadImage(phList,doStack=True,savePreStackImage=imSaveName,
firstSec=firstSec,integrationTime=integrationTime,
wvlMin=wvlMin, wvlMax=wvlMax, doWeighted=doWeighted)
virtualImage.display(pclip=True)
return virtualImage
if __name__ == "__main__":
getSimpleImage()
| gpl-2.0 | -3,305,290,240,977,502,700 | 34.058824 | 106 | 0.700503 | false |
dmonroy/chilero.pg | chilero/pg/test.py | 1 | 2550 | import asyncio
import json
import random
import string
import sys
from aiohttp import request
from chilero.web.test import WebTestCase
from .application import Application
TEST_DB_SUFFIX = 'test_{}{}{}'.format(
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
)
class TestCase(WebTestCase):
application = Application
settings = {}
@asyncio.coroutine
def initialize_application(self):
return self.application(
routes=self.routes,
settings=self.settings,
loop=self.loop
)
def _random_string(self, length=12):
return ''.join(
random.choice(string.ascii_lowercase) for i in range(length)
)
@asyncio.coroutine
def _get(self, path, **kwargs):
resp = yield from request('GET', path, loop=self.loop, **kwargs)
return resp
@asyncio.coroutine
def _get_json(self, path, **kwargs):
resp = yield from self._get(path, **kwargs)
jresp = yield from resp.json()
resp.close()
return jresp
@asyncio.coroutine
def _index(self, path):
resp = yield from self._get_json(self.full_url(path))
return resp
@asyncio.coroutine
def _create(self, path, data):
resp = yield from request(
'POST', self.full_url(path), loop=self.loop,
data=json.dumps(data)
)
return resp
@asyncio.coroutine
def _create_and_get(self, path, data, defaults=None):
defaults = defaults or {}
defaults.update(data)
resp = yield from self._create(path, defaults)
if resp.status != 201:
return resp, None
url = resp.headers['Location']
jresp = yield from self._get_json(url)
return resp, jresp['body']
@asyncio.coroutine
def _patch(self, url, **kwargs):
resp = yield from request(
'PATCH', url, loop=self.loop,
data=json.dumps(kwargs)
)
return resp
@asyncio.coroutine
def _delete(self, path, **kwargs):
resp = yield from request(
'DELETE', path, loop=self.loop,
**kwargs
)
return resp
@asyncio.coroutine
def _search(self, path, terms):
resp = self._get_json(
self.full_url(
'{endpoint}?search={keywords}'.format(
endpoint=path,
keywords='+'.join(terms.split())
)
)
)
return resp
| mit | 3,201,463,747,761,111,600 | 24.247525 | 72 | 0.565098 | false |
jc6036/LordsAndLadies | file_filler.py | 1 | 2432 | # Using this to simply fill up a text file with placeholder names
def fill_names(filename, num_of_names, variation, gender = "none"):
if gender == "male":
if variation == "first":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Male_Name %d\n" % i)
elif gender == "female":
if variation == "first":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Female_Name %d\n" % i)
else:
if variation == "last":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Last_Name %d\n" % i)
elif variation == "kingdom":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Kingdom_Name %d\n" % i)
def fill_locations(filename, num_of_names):
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Test_Location %d\n" % i)
def fill_titles(filename, num_of_titles, fix, job_type):
if job_type == "noble":
if fix == "prefix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Prefix_Title_Noble %s\n" % str(i))
elif fix == "subfix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Subfix_Title_Noble %s\n" % str(i))
elif job_type == "common":
if fix == "prefix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Prefix_Title_Common %s\n" % str(i))
elif fix == "subfix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Subfix_Title_Common %s\n" % str(i))
fill_names("male_names.txt", 500, "first", "male")
fill_names("female_names.txt", 500, "first", "female")
fill_names("last_names.txt", 250, "last", "none")
| gpl-2.0 | 6,665,909,490,550,261,000 | 34.246377 | 74 | 0.515625 | false |
nschaetti/EchoTorch | echotorch/transforms/text/Character.py | 1 | 3378 | # -*- coding: utf-8 -*-
#
# Imports
import torch
from .Transformer import Transformer
# Transform text to character vectors
class Character(Transformer):
"""
Transform text to character vectors
"""
# Constructor
def __init__(self, uppercase=False, gram_to_ix=None, start_ix=0, fixed_length=-1):
"""
Constructor
"""
# Gram to ix
if gram_to_ix is not None:
self.gram_count = len(gram_to_ix.keys())
self.gram_to_ix = gram_to_ix
else:
self.gram_count = start_ix
self.gram_to_ix = dict()
# end if
# Ix to gram
self.ix_to_gram = dict()
if gram_to_ix is not None:
for gram in gram_to_ix.keys():
self.ix_to_gram[gram_to_ix[gram]] = gram
# end for
# end if
# Properties
self.uppercase = uppercase
self.fixed_length = fixed_length
# Super constructor
super(Character, self).__init__()
# end __init__
##############################################
# Public
##############################################
##############################################
# Properties
##############################################
# Get the number of inputs
@property
def input_dim(self):
"""
Get the number of inputs.
:return: The input size.
"""
return 1
# end input_dim
# Vocabulary size
@property
def voc_size(self):
"""
Vocabulary size
:return:
"""
return self.gram_count
# end voc_size
##############################################
# Private
##############################################
# To upper
def to_upper(self, gram):
"""
To upper
:param gram:
:return:
"""
if not self.uppercase:
return gram.lower()
# end if
return gram
# end to_upper
##############################################
# Override
##############################################
# Convert a string
def __call__(self, text):
"""
Convert a string to a ESN input
:param text: Text to convert
:return: Tensor of word vectors
"""
# Add to voc
for i in range(len(text)):
gram = self.to_upper(text[i])
if gram not in self.gram_to_ix.keys():
self.gram_to_ix[gram] = self.gram_count
self.ix_to_gram[self.gram_count] = gram
self.gram_count += 1
# end if
# end for
# List of character to 2grams
text_idxs = [self.gram_to_ix[self.to_upper(text[i])] for i in range(len(text))]
# To long tensor
text_idxs = torch.LongTensor(text_idxs)
# Check length
if self.fixed_length != -1:
if text_idxs.size(0) > self.fixed_length:
text_idxs = text_idxs[:self.fixed_length]
elif text_idxs.size(0) < self.fixed_length:
zero_idxs = torch.LongTensor(self.fixed_length).fill_(0)
zero_idxs[:text_idxs.size(0)] = text_idxs
text_idxs = zero_idxs
# end if
# end if
return text_idxs, text_idxs.size(0)
# end convert
# end FunctionWord
| gpl-3.0 | 8,788,521,190,926,664,000 | 24.78626 | 87 | 0.450266 | false |
mhils/mitmproxy | mitmproxy/flowfilter.py | 1 | 14034 | """
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bs rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
import functools
import re
import sys
from typing import Callable, ClassVar, Optional, Sequence, Type
import pyparsing as pp
from mitmproxy import flow, http, tcp, websocket
from mitmproxy.net.websocket import check_handshake
def only(*types):
def decorator(fn):
@functools.wraps(fn)
def filter_types(self, flow):
if isinstance(flow, types):
return fn(self, flow)
return False
return filter_types
return decorator
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print("{spacing}{name}{expr}".format(
spacing="\t" * indent,
name=self.__class__.__name__,
expr=getattr(self, "expr", "")
), file=fp)
class _Action(_Token):
code: ClassVar[str]
help: ClassVar[str]
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FMarked(_Action):
code = "marked"
help = "Match marked flows"
def __call__(self, f):
return f.marked
class FHTTP(_Action):
code = "http"
help = "Match HTTP flows"
@only(http.HTTPFlow)
def __call__(self, f):
return True
class FWebSocket(_Action):
code = "websocket"
help = "Match WebSocket flows (and HTTP-WebSocket handshake flows)"
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
m = (
(isinstance(f, http.HTTPFlow) and f.request and check_handshake(f.request.headers))
or isinstance(f, websocket.WebSocketFlow)
)
return m
class FTCP(_Action):
code = "tcp"
help = "Match TCP flows"
@only(tcp.TCPFlow)
def __call__(self, f):
return True
class FReq(_Action):
code = "q"
help = "Match request with no response"
@only(http.HTTPFlow)
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
@only(http.HTTPFlow)
def __call__(self, f):
return bool(f.response)
class _Rex(_Action):
flags = 0
is_binary = True
def __init__(self, expr):
self.expr = expr
if self.is_binary:
expr = expr.encode()
try:
self.re = re.compile(expr, self.flags)
except Exception:
raise ValueError("Cannot compile expression.")
def _check_content_type(rex, message):
return any(
name.lower() == b"content-type" and
rex.search(value)
for name, value in message.headers.fields
)
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, Javascript, Flash, images."
ASSET_TYPES = [re.compile(x) for x in [
b"text/javascript",
b"application/x-javascript",
b"application/javascript",
b"text/css",
b"image/.*",
b"application/x-shockwave-flash"
]]
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
@only(http.HTTPFlow)
def __call__(self, f):
if _check_content_type(self.re, f.request):
return True
elif f.response and _check_content_type(self.re, f.response):
return True
return False
class FContentTypeRequest(_Rex):
code = "tq"
help = "Request Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
return _check_content_type(self.re, f.request)
class FContentTypeResponse(_Rex):
code = "ts"
help = "Response Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
return _check_content_type(self.re, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
if f.response and self.re.search(bytes(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and self.re.search(bytes(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if self.re.search(msg.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if msg.from_client and self.re.search(msg.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if not msg.from_client and self.re.search(msg.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
is_binary = False
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
if isinstance(f, websocket.WebSocketFlow):
f = f.handshake_flow
return bool(
self.re.search(f.request.host) or
self.re.search(f.request.pretty_host)
)
class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
if isinstance(f, websocket.WebSocketFlow):
f = f.handshake_flow
if not f or not f.request:
return False
return self.re.search(f.request.pretty_url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
is_binary = False
def __call__(self, f):
if not f.client_conn or not f.client_conn.address:
return False
r = "{}:{}".format(f.client_conn.address[0], f.client_conn.address[1])
return f.client_conn.address and self.re.search(r)
class FDst(_Rex):
code = "dst"
help = "Match destination address"
is_binary = False
def __call__(self, f):
if not f.server_conn or not f.server_conn.address:
return False
r = "{}:{}".format(f.server_conn.address[0], f.server_conn.address[1])
return f.server_conn.address and self.re.search(r)
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filter_unary: Sequence[Type[_Action]] = [
FAsset,
FErr,
FHTTP,
FMarked,
FReq,
FResp,
FTCP,
FWebSocket,
]
filter_rex: Sequence[Type[_Rex]] = [
FBod,
FBodRequest,
FBodResponse,
FContentType,
FContentTypeRequest,
FContentTypeResponse,
FDomain,
FDst,
FHead,
FHeadRequest,
FHeadResponse,
FMethod,
FSrc,
FUrl,
]
filter_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for cls in filter_unary:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd()
f.setParseAction(cls.make)
parts.append(f)
# This is a bit of a hack to simulate Word(pyparsing_unicode.printables),
# which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060
unicode_words = pp.CharsNotIn("()~'\"" + pp.ParserElement.DEFAULT_WHITE_CHARS)
unicode_words.skipWhitespace = True
regex = (
unicode_words
| pp.QuotedString('"', escChar='\\')
| pp.QuotedString("'", escChar='\\')
)
for cls in filter_rex:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + regex.copy()
f.setParseAction(cls.make)
parts.append(f)
for cls in filter_int:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(cls.make)
parts.append(f)
# A naked rex is a URL rex:
f = regex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.infixNotation(
atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
TFilter = Callable[[flow.Flow], bool]
def parse(s: str) -> Optional[TFilter]:
try:
flt = bnf.parseString(s, parseAll=True)[0]
flt.pattern = s
return flt
except pp.ParseException:
return None
except ValueError:
return None
def match(flt, flow):
"""
Matches a flow against a compiled filter expression.
Returns True if matched, False if not.
If flt is a string, it will be compiled as a filter expression.
If the expression is invalid, ValueError is raised.
"""
if isinstance(flt, str):
flt = parse(flt)
if not flt:
raise ValueError("Invalid filter expression.")
if flt:
return flt(flow)
return True
help = []
for a in filter_unary:
help.append(
(f"~{a.code}", a.help)
)
for b in filter_rex:
help.append(
(f"~{b.code} regex", b.help)
)
for c in filter_int:
help.append(
(f"~{c.code} int", c.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
| mit | 5,222,736,535,658,669,000 | 23.322357 | 98 | 0.562847 | false |
rwgdrummer/maskgen | hp_tool/hp/ErrorWindow.py | 1 | 2192 | from Tkinter import *
from tkSimpleDialog import Dialog
import json
import csv
import tkFileDialog
class ErrorWindow(Dialog):
"""
Provided a list of error messages, shows them in a simple pop-up window.
"""
def __init__(self, master, errors):
self.errors = errors
self.cancelPressed = True
Dialog.__init__(self, master, title='Validation')
def body(self, master):
frame = Frame(self, bd=2, relief=SUNKEN)
frame.pack(fill=BOTH, expand=TRUE)
yscrollbar = Scrollbar(frame)
yscrollbar.pack(side=RIGHT, fill=Y)
xscrollbar = Scrollbar(frame, orient=HORIZONTAL)
xscrollbar.pack(side=BOTTOM, fill=X)
self.listbox = Listbox(frame, width=80, height=15)
self.listbox.pack(fill=BOTH, expand=1)
if type(self.errors) == str:
with open(self.errors) as j:
self.errors = json.load(j)
if type(self.errors) == dict:
for i in self.errors:
for message in self.errors[i]:
self.listbox.insert(END, message[1])
else:
for i in self.errors:
self.listbox.insert(END, i)
# attach listbox to scrollbar
self.listbox.config(yscrollcommand=yscrollbar.set, xscrollcommand=xscrollbar.set)
yscrollbar.config(command=self.listbox.yview)
xscrollbar.config(command=self.listbox.xview)
def buttonbox(self):
box = Frame(self)
exportButton = Button(self, text='Export', width=10, command=self.export)
exportButton.pack(side=RIGHT, padx=5, pady=5)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
def export(self):
with tkFileDialog.asksaveasfile(mode='w', defaultextension='.txt') as f:
f.write('\n'.join(self.listbox.get(0, END)))
f.write('\n')
def apply(self):
self.cancelPressed = False | bsd-3-clause | -6,608,855,206,167,699,000 | 30.782609 | 89 | 0.605839 | false |
cyphactor/lifecyclemanager | testenv/trac-0.10.4/trac/util/autoreload.py | 1 | 2888 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import time
import thread
_SLEEP_TIME = 1
def _reloader_thread(modification_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
@param modification_callback: Function taking a single argument, the
modified file, and is called after a modification is detected."""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, "__file__", None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime > mtimes[filename]:
modification_callback(filename)
sys.exit(3)
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, sys.executable,
args, new_environ)
if exit_code != 3:
return exit_code
def main(main_func, modification_callback):
"""Run `main_func` and restart any time modules are changed."""
if os.environ.get("RUN_MAIN"):
# Lanch the actual program as a child thread
thread.start_new_thread(main_func, ())
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
| gpl-3.0 | -7,681,305,959,908,121,000 | 34.219512 | 80 | 0.608033 | false |
devananda/ironic | ironic/tests/unit/drivers/modules/irmc/test_power.py | 1 | 8573 | # Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for iRMC Power Driver
"""
import mock
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.irmc import boot as irmc_boot
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import power as irmc_power
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_irmc_info()
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
class IRMCPowerInternalMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCPowerInternalMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_irmc')
driver_info = INFO_DICT
self.node = db_utils.create_test_node(
driver='fake_irmc',
driver_info=driver_info,
instance_uuid=uuidutils.generate_uuid())
@mock.patch.object(irmc_boot, 'attach_boot_iso_if_needed')
def test__set_power_state_power_on_ok(
self,
attach_boot_iso_if_needed_mock,
get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
irmc_power._set_power_state(task, target_state)
attach_boot_iso_if_needed_mock.assert_called_once_with(task)
irmc_client.assert_called_once_with(irmc_power.scci.POWER_ON)
def test__set_power_state_power_off_ok(self,
get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
target_state = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
irmc_power._set_power_state(task, target_state)
irmc_client.assert_called_once_with(irmc_power.scci.POWER_OFF)
@mock.patch.object(irmc_boot, 'attach_boot_iso_if_needed')
def test__set_power_state_power_reboot_ok(
self,
attach_boot_iso_if_needed_mock,
get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
target_state = states.REBOOT
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
irmc_power._set_power_state(task, target_state)
attach_boot_iso_if_needed_mock.assert_called_once_with(task)
irmc_client.assert_called_once_with(irmc_power.scci.POWER_RESET)
def test__set_power_state_invalid_target_state(self,
get_irmc_client_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
irmc_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_scci_exception(self,
get_irmc_client_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception()
irmc_power.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IRMCOperationError,
irmc_power._set_power_state,
task,
states.POWER_ON)
class IRMCPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.node = obj_utils.create_test_node(self.context,
driver='fake_irmc',
driver_info=driver_info)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
properties = task.driver.get_properties()
for prop in irmc_common.COMMON_PROPERTIES:
self.assertIn(prop, properties)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = iter([exception.InvalidParameterValue("Invalid Input")])
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch('ironic.drivers.modules.irmc.power.ipmitool.IPMIPower',
spec_set=True, autospec=True)
def test_get_power_state(self, mock_IPMIPower):
ipmi_power = mock_IPMIPower.return_value
ipmi_power.get_power_state.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
ipmi_power.get_power_state.assert_called_once_with(task)
@mock.patch.object(irmc_power, '_set_power_state', spec_set=True,
autospec=True)
def test_set_power_state(self, mock_set_power):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
mock_set_power.assert_called_once_with(task, states.POWER_ON)
@mock.patch.object(irmc_power, '_set_power_state', spec_set=True,
autospec=True)
@mock.patch.object(irmc_power.IRMCPower, 'get_power_state', spec_set=True,
autospec=True)
def test_reboot_reboot(self, mock_get_power, mock_set_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_get_power.return_value = states.POWER_ON
task.driver.power.reboot(task)
mock_get_power.assert_called_once_with(
task.driver.power, task)
mock_set_power.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(irmc_power, '_set_power_state', spec_set=True,
autospec=True)
@mock.patch.object(irmc_power.IRMCPower, 'get_power_state', spec_set=True,
autospec=True)
def test_reboot_power_on(self, mock_get_power, mock_set_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_get_power.return_value = states.POWER_OFF
task.driver.power.reboot(task)
mock_get_power.assert_called_once_with(
task.driver.power, task)
mock_set_power.assert_called_once_with(task, states.POWER_ON)
| apache-2.0 | -3,511,996,930,661,229,600 | 44.84492 | 78 | 0.606089 | false |
KBNLresearch/iromlab | iromlab/testsru.py | 1 | 1831 | #! /usr/bin/env python
import io
import xml.etree.ElementTree as ETree
from .kbapi import sru
def main():
"""
Script for testing SRU interface outside Iromlab
(not used by main Iromlab application)
"""
catid = "184556155"
# Lookup catalog identifier
#sruSearchString = '"PPN=' + str(catid) + '"'
sruSearchString = 'OaiPmhIdentifier="GGC:AC:' + str(catid) + '"'
print(sruSearchString)
response = sru.search(sruSearchString, "GGC")
if not response:
noGGCRecords = 0
else:
noGGCRecords = response.sru.nr_of_records
if noGGCRecords == 0:
# No matching record found
msg = ("Search for PPN=" + str(catid) + " returned " +
"no matching record in catalog!")
print("PPN not found", msg)
else:
record = next(response.records)
# Title can be in either in:
# 1. title element
# 2. title element with maintitle attribute
# 3. title element with intermediatetitle attribute (3 in combination with 2)
titlesMain = record.titlesMain
titlesIntermediate = record.titlesIntermediate
titles = record.titles
if titlesMain != []:
title = titlesMain[0]
if titlesIntermediate != []:
title = title + ", " + titlesIntermediate[0]
else:
title = titles[0]
print("Title: " + title)
# Write XML
recordData = record.record_data
recordAsString = ETree.tostring(recordData, encoding='UTF-8', method='xml')
try:
with io.open("meta-kbmdo.xml", "wb") as fOut:
fOut.write(recordAsString)
fOut.close()
except IOError:
print("Could not write KB-MDO metadata to file")
if __name__ == "__main__":
main() | apache-2.0 | 1,240,776,010,216,517,400 | 27.625 | 85 | 0.581103 | false |
madsmpedersen/MMPE | datastructures/dual_key_dict.py | 1 | 3481 | '''
Created on 08/11/2013
@author: mmpe
'''
class DualKeyDict(object):
def __init__(self, unique_key_att, additional_key_att):
self._unique_key_att = unique_key_att
self._additional_key_att = additional_key_att
self._dict = {}
self._unique_keys = set()
def __getitem__(self, key):
obj = self._dict[key]
if isinstance(obj, list):
raise AttributeError("More objects associated by key, '%s'. Use 'get' function to get list of objects" % key)
else:
return obj
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return (i for i in self._unique_keys)
def __setitem__(self, key, obj):
self.add(obj)
def __len__(self):
return len(self._unique_keys)
def add(self, obj):
unique_key = getattr(obj, self._unique_key_att)
if unique_key in self._unique_keys:
raise KeyError("Key '%s' already exists in dict" % unique_key)
self._dict[unique_key] = obj
self._unique_keys.add(unique_key)
additional_key = getattr(obj, self._additional_key_att)
if additional_key in self._dict:
existing_obj = self._dict[additional_key]
if isinstance(existing_obj, list):
existing_obj.append(obj)
else:
self._dict[additional_key] = [existing_obj, obj]
else:
self._dict[additional_key] = obj
def get(self, key, default=None, multiple_error=False):
"""
Return <object> or <list of objects> associated by 'key'
If key not exists, 'default' is returned
If multiple_error is true, ValueError is raised if 'key' associates a <list of objects>
"""
if key in self._dict:
obj = self._dict[key]
if multiple_error and isinstance(obj, list):
raise AttributeError("More objects associated by key, '%s'" % key)
return obj
else:
return default
def keys(self):
"""Return list of unique keys"""
return list(self._unique_keys)
def values(self):
return [self._dict[k] for k in self._unique_keys]
def __str__(self):
return "{%s}" % ",".join(["(%s,%s): %s" % (getattr(obj, self._unique_key_att), getattr(obj, self._additional_key_att), obj) for obj in self.values()])
def remove(self, value):
"""
Value may be:
- unique key
- additional key
- object
"""
obj = self._dict.get(value, value)
unique_key = getattr(obj, self._unique_key_att)
del self._dict[unique_key]
self._unique_keys.remove(unique_key)
additional_key = getattr(obj, self._additional_key_att)
value = self._dict[additional_key]
if isinstance(value, list):
value = [v for v in value if v is not obj]
#value.remove(obj)
if len(value) == 1:
self._dict[additional_key] = value[0]
else:
self._dict[additional_key] = value
else:
del self._dict[additional_key]
return obj
def clear(self):
self._dict.clear()
self._unique_keys.clear()
def copy(self):
copy = DualKeyDict(self._unique_key_att, self._additional_key_att)
copy._unique_keys = self._unique_keys.copy()
copy._dict = self._dict.copy()
return copy
| apache-2.0 | 1,287,717,536,764,929,500 | 29.535088 | 158 | 0.556162 | false |
zjuchenyuan/BioWeb | Lib/Bio/codonalign/codonalphabet.py | 1 | 2100 | # Copyright 2013 by Zheng Ruan ([email protected]).
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for Codon Alphabet.
CodonAlphabet class is inherited from Alphabet class. It is an
alphabet for CodonSeq class.
"""
import copy
try:
from itertools import izip
except ImportError:
izip = zip
from Bio.Alphabet import IUPAC, Gapped, HasStopCodon, Alphabet
from Bio.Data.CodonTable import generic_by_id
default_codon_table = copy.deepcopy(generic_by_id[1])
def get_codon_alphabet(alphabet, gap="-", stop="*"):
"""Gets alignment alphabet for codon alignment.
Only nucleotide alphabet is accepted. Raise an error when the type of
alphabet is incompatible.
"""
from Bio.Alphabet import NucleotideAlphabet
if isinstance(alphabet, NucleotideAlphabet):
alpha = alphabet
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
else:
raise TypeError("Only Nuclteotide Alphabet is accepted!")
return alpha
default_alphabet = get_codon_alphabet(IUPAC.unambiguous_dna)
class CodonAlphabet(Alphabet):
"""Generic Codon Alphabet with a size of three"""
size = 3
letters = None
name = ''
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.names[0])
def get_codon_alphabet(codon_table, gap_char="-"):
letters = list(codon_table.forward_table.keys())
letters.extend(codon_table.stop_codons)
letters.extend(codon_table.start_codons)
if gap_char:
letters.append(gap_char * 3)
generic_codon_alphabet = CodonAlphabet()
generic_codon_alphabet.letters = letters
generic_codon_alphabet.gap_char = '-'
generic_codon_alphabet.names = codon_table.names
return generic_codon_alphabet
default_codon_alphabet = get_codon_alphabet(default_codon_table)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| mit | -3,725,978,972,403,675,000 | 28.577465 | 73 | 0.69381 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.